1 """Python wrappers around TensorFlow ops.
   2 
   3 This file is MACHINE GENERATED! Do not edit.
   4 Original C++ source file: nn_ops.cc
   5 """
   6 
   7 import collections as _collections
   8 import six as _six
   9 
  10 from tensorflow.python import pywrap_tensorflow as _pywrap_tensorflow
  11 from tensorflow.python.eager import context as _context
  12 from tensorflow.python.eager import core as _core
  13 from tensorflow.python.eager import execute as _execute
  14 from tensorflow.python.framework import dtypes as _dtypes
  15 from tensorflow.python.framework import errors as _errors
  16 from tensorflow.python.framework import tensor_shape as _tensor_shape
  17 
  18 from tensorflow.core.framework import op_def_pb2 as _op_def_pb2
  19 # Needed to trigger the call to _set_call_cpp_shape_fn.
  20 from tensorflow.python.framework import common_shapes as _common_shapes
  21 from tensorflow.python.framework import op_def_registry as _op_def_registry
  22 from tensorflow.python.framework import ops as _ops
  23 from tensorflow.python.framework import op_def_library as _op_def_library
  24 from tensorflow.python.util.deprecation import deprecated_endpoints
  25 from tensorflow.python.util.tf_export import tf_export
  26 
  27 
  28 def avg_pool(value, ksize, strides, padding, data_format="NHWC", name=None):
  29   r"""Performs average pooling on the input.
  30 
  31   Each entry in `output` is the mean of the corresponding size `ksize`
  32   window in `value`.
  33 
  34   Args:
  35     value: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
  36       4-D with shape `[batch, height, width, channels]`.
  37     ksize: A list of `ints` that has length `>= 4`.
  38       The size of the sliding window for each dimension of `value`.
  39     strides: A list of `ints` that has length `>= 4`.
  40       The stride of the sliding window for each dimension of `value`.
  41     padding: A `string` from: `"SAME", "VALID"`.
  42       The type of padding algorithm to use.
  43     data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`.
  44       Specify the data format of the input and output data. With the
  45       default format "NHWC", the data is stored in the order of:
  46           [batch, in_height, in_width, in_channels].
  47       Alternatively, the format could be "NCHW", the data storage order of:
  48           [batch, in_channels, in_height, in_width].
  49     name: A name for the operation (optional).
  50 
  51   Returns:
  52     A `Tensor`. Has the same type as `value`.
  53   """
  54   _ctx = _context._context
  55   if _ctx is None or not _ctx._eager_context.is_eager:
  56     if not isinstance(ksize, (list, tuple)):
  57       raise TypeError(
  58           "Expected list for 'ksize' argument to "
  59           "'avg_pool' Op, not %r." % ksize)
  60     ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
  61     if not isinstance(strides, (list, tuple)):
  62       raise TypeError(
  63           "Expected list for 'strides' argument to "
  64           "'avg_pool' Op, not %r." % strides)
  65     strides = [_execute.make_int(_i, "strides") for _i in strides]
  66     padding = _execute.make_str(padding, "padding")
  67     if data_format is None:
  68       data_format = "NHWC"
  69     data_format = _execute.make_str(data_format, "data_format")
  70     _, _, _op = _op_def_lib._apply_op_helper(
  71         "AvgPool", value=value, ksize=ksize, strides=strides, padding=padding,
  72         data_format=data_format, name=name)
  73     _result = _op.outputs[:]
  74     _inputs_flat = _op.inputs
  75     _attrs = ("ksize", _op.get_attr("ksize"), "strides",
  76               _op.get_attr("strides"), "padding", _op.get_attr("padding"),
  77               "data_format", _op.get_attr("data_format"), "T",
  78               _op.get_attr("T"))
  79     _execute.record_gradient(
  80       "AvgPool", _inputs_flat, _attrs, _result, name)
  81     _result, = _result
  82     return _result
  83 
  84   else:
  85     try:
  86       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
  87         _ctx._context_handle, _ctx._eager_context.device_name, "AvgPool",
  88         name, _ctx._post_execution_callbacks, value, "ksize", ksize,
  89         "strides", strides, "padding", padding, "data_format", data_format)
  90       return _result
  91     except _core._FallbackException:
  92       return avg_pool_eager_fallback(
  93           value, ksize=ksize, strides=strides, padding=padding,
  94           data_format=data_format, name=name, ctx=_ctx)
  95     except _core._NotOkStatusException as e:
  96       if name is not None:
  97         message = e.message + " name: " + name
  98       else:
  99         message = e.message
 100       _six.raise_from(_core._status_to_exception(e.code, message), None)
 101 
 102 
 103 def avg_pool_eager_fallback(value, ksize, strides, padding, data_format="NHWC", name=None, ctx=None):
 104   r"""This is the slowpath function for Eager mode.
 105   This is for function avg_pool
 106   """
 107   _ctx = ctx if ctx else _context.context()
 108   if not isinstance(ksize, (list, tuple)):
 109     raise TypeError(
 110         "Expected list for 'ksize' argument to "
 111         "'avg_pool' Op, not %r." % ksize)
 112   ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
 113   if not isinstance(strides, (list, tuple)):
 114     raise TypeError(
 115         "Expected list for 'strides' argument to "
 116         "'avg_pool' Op, not %r." % strides)
 117   strides = [_execute.make_int(_i, "strides") for _i in strides]
 118   padding = _execute.make_str(padding, "padding")
 119   if data_format is None:
 120     data_format = "NHWC"
 121   data_format = _execute.make_str(data_format, "data_format")
 122   _attr_T, (value,) = _execute.args_to_matching_eager([value], _ctx)
 123   _inputs_flat = [value]
 124   _attrs = ("ksize", ksize, "strides", strides, "padding", padding,
 125   "data_format", data_format, "T", _attr_T)
 126   _result = _execute.execute(b"AvgPool", 1, inputs=_inputs_flat, attrs=_attrs,
 127                              ctx=_ctx, name=name)
 128   _execute.record_gradient(
 129       "AvgPool", _inputs_flat, _attrs, _result, name)
 130   _result, = _result
 131   return _result
 132 
 133 
 134 @tf_export('nn.avg_pool3d')
 135 def avg_pool3d(input, ksize, strides, padding, data_format="NDHWC", name=None):
 136   r"""Performs 3D average pooling on the input.
 137 
 138   Args:
 139     input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
 140       Shape `[batch, depth, rows, cols, channels]` tensor to pool over.
 141     ksize: A list of `ints` that has length `>= 5`.
 142       1-D tensor of length 5. The size of the window for each dimension of
 143       the input tensor. Must have `ksize[0] = ksize[4] = 1`.
 144     strides: A list of `ints` that has length `>= 5`.
 145       1-D tensor of length 5. The stride of the sliding window for each
 146       dimension of `input`. Must have `strides[0] = strides[4] = 1`.
 147     padding: A `string` from: `"SAME", "VALID"`.
 148       The type of padding algorithm to use.
 149     data_format: An optional `string` from: `"NDHWC", "NCDHW"`. Defaults to `"NDHWC"`.
 150       The data format of the input and output data. With the
 151       default format "NDHWC", the data is stored in the order of:
 152           [batch, in_depth, in_height, in_width, in_channels].
 153       Alternatively, the format could be "NCDHW", the data storage order is:
 154           [batch, in_channels, in_depth, in_height, in_width].
 155     name: A name for the operation (optional).
 156 
 157   Returns:
 158     A `Tensor`. Has the same type as `input`.
 159   """
 160   _ctx = _context._context
 161   if _ctx is None or not _ctx._eager_context.is_eager:
 162     if not isinstance(ksize, (list, tuple)):
 163       raise TypeError(
 164           "Expected list for 'ksize' argument to "
 165           "'avg_pool3d' Op, not %r." % ksize)
 166     ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
 167     if not isinstance(strides, (list, tuple)):
 168       raise TypeError(
 169           "Expected list for 'strides' argument to "
 170           "'avg_pool3d' Op, not %r." % strides)
 171     strides = [_execute.make_int(_i, "strides") for _i in strides]
 172     padding = _execute.make_str(padding, "padding")
 173     if data_format is None:
 174       data_format = "NDHWC"
 175     data_format = _execute.make_str(data_format, "data_format")
 176     _, _, _op = _op_def_lib._apply_op_helper(
 177         "AvgPool3D", input=input, ksize=ksize, strides=strides,
 178         padding=padding, data_format=data_format, name=name)
 179     _result = _op.outputs[:]
 180     _inputs_flat = _op.inputs
 181     _attrs = ("ksize", _op.get_attr("ksize"), "strides",
 182               _op.get_attr("strides"), "padding", _op.get_attr("padding"),
 183               "data_format", _op.get_attr("data_format"), "T",
 184               _op.get_attr("T"))
 185     _execute.record_gradient(
 186       "AvgPool3D", _inputs_flat, _attrs, _result, name)
 187     _result, = _result
 188     return _result
 189 
 190   else:
 191     try:
 192       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
 193         _ctx._context_handle, _ctx._eager_context.device_name, "AvgPool3D",
 194         name, _ctx._post_execution_callbacks, input, "ksize", ksize,
 195         "strides", strides, "padding", padding, "data_format", data_format)
 196       return _result
 197     except _core._FallbackException:
 198       return avg_pool3d_eager_fallback(
 199           input, ksize=ksize, strides=strides, padding=padding,
 200           data_format=data_format, name=name, ctx=_ctx)
 201     except _core._NotOkStatusException as e:
 202       if name is not None:
 203         message = e.message + " name: " + name
 204       else:
 205         message = e.message
 206       _six.raise_from(_core._status_to_exception(e.code, message), None)
 207 
 208 
 209 def avg_pool3d_eager_fallback(input, ksize, strides, padding, data_format="NDHWC", name=None, ctx=None):
 210   r"""This is the slowpath function for Eager mode.
 211   This is for function avg_pool3d
 212   """
 213   _ctx = ctx if ctx else _context.context()
 214   if not isinstance(ksize, (list, tuple)):
 215     raise TypeError(
 216         "Expected list for 'ksize' argument to "
 217         "'avg_pool3d' Op, not %r." % ksize)
 218   ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
 219   if not isinstance(strides, (list, tuple)):
 220     raise TypeError(
 221         "Expected list for 'strides' argument to "
 222         "'avg_pool3d' Op, not %r." % strides)
 223   strides = [_execute.make_int(_i, "strides") for _i in strides]
 224   padding = _execute.make_str(padding, "padding")
 225   if data_format is None:
 226     data_format = "NDHWC"
 227   data_format = _execute.make_str(data_format, "data_format")
 228   _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
 229   _inputs_flat = [input]
 230   _attrs = ("ksize", ksize, "strides", strides, "padding", padding,
 231   "data_format", data_format, "T", _attr_T)
 232   _result = _execute.execute(b"AvgPool3D", 1, inputs=_inputs_flat,
 233                              attrs=_attrs, ctx=_ctx, name=name)
 234   _execute.record_gradient(
 235       "AvgPool3D", _inputs_flat, _attrs, _result, name)
 236   _result, = _result
 237   return _result
 238 
 239 
 240 def avg_pool3d_grad(orig_input_shape, grad, ksize, strides, padding, data_format="NDHWC", name=None):
 241   r"""Computes gradients of average pooling function.
 242 
 243   Args:
 244     orig_input_shape: A `Tensor` of type `int32`.
 245       The original input dimensions.
 246     grad: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
 247       Output backprop of shape `[batch, depth, rows, cols, channels]`.
 248     ksize: A list of `ints` that has length `>= 5`.
 249       1-D tensor of length 5. The size of the window for each dimension of
 250       the input tensor. Must have `ksize[0] = ksize[4] = 1`.
 251     strides: A list of `ints` that has length `>= 5`.
 252       1-D tensor of length 5. The stride of the sliding window for each
 253       dimension of `input`. Must have `strides[0] = strides[4] = 1`.
 254     padding: A `string` from: `"SAME", "VALID"`.
 255       The type of padding algorithm to use.
 256     data_format: An optional `string` from: `"NDHWC", "NCDHW"`. Defaults to `"NDHWC"`.
 257       The data format of the input and output data. With the
 258       default format "NDHWC", the data is stored in the order of:
 259           [batch, in_depth, in_height, in_width, in_channels].
 260       Alternatively, the format could be "NCDHW", the data storage order is:
 261           [batch, in_channels, in_depth, in_height, in_width].
 262     name: A name for the operation (optional).
 263 
 264   Returns:
 265     A `Tensor`. Has the same type as `grad`.
 266   """
 267   _ctx = _context._context
 268   if _ctx is None or not _ctx._eager_context.is_eager:
 269     if not isinstance(ksize, (list, tuple)):
 270       raise TypeError(
 271           "Expected list for 'ksize' argument to "
 272           "'avg_pool3d_grad' Op, not %r." % ksize)
 273     ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
 274     if not isinstance(strides, (list, tuple)):
 275       raise TypeError(
 276           "Expected list for 'strides' argument to "
 277           "'avg_pool3d_grad' Op, not %r." % strides)
 278     strides = [_execute.make_int(_i, "strides") for _i in strides]
 279     padding = _execute.make_str(padding, "padding")
 280     if data_format is None:
 281       data_format = "NDHWC"
 282     data_format = _execute.make_str(data_format, "data_format")
 283     _, _, _op = _op_def_lib._apply_op_helper(
 284         "AvgPool3DGrad", orig_input_shape=orig_input_shape, grad=grad,
 285         ksize=ksize, strides=strides, padding=padding,
 286         data_format=data_format, name=name)
 287     _result = _op.outputs[:]
 288     _inputs_flat = _op.inputs
 289     _attrs = ("ksize", _op.get_attr("ksize"), "strides",
 290               _op.get_attr("strides"), "padding", _op.get_attr("padding"),
 291               "data_format", _op.get_attr("data_format"), "T",
 292               _op.get_attr("T"))
 293     _execute.record_gradient(
 294       "AvgPool3DGrad", _inputs_flat, _attrs, _result, name)
 295     _result, = _result
 296     return _result
 297 
 298   else:
 299     try:
 300       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
 301         _ctx._context_handle, _ctx._eager_context.device_name,
 302         "AvgPool3DGrad", name, _ctx._post_execution_callbacks,
 303         orig_input_shape, grad, "ksize", ksize, "strides", strides, "padding",
 304         padding, "data_format", data_format)
 305       return _result
 306     except _core._FallbackException:
 307       return avg_pool3d_grad_eager_fallback(
 308           orig_input_shape, grad, ksize=ksize, strides=strides,
 309           padding=padding, data_format=data_format, name=name, ctx=_ctx)
 310     except _core._NotOkStatusException as e:
 311       if name is not None:
 312         message = e.message + " name: " + name
 313       else:
 314         message = e.message
 315       _six.raise_from(_core._status_to_exception(e.code, message), None)
 316 
 317 
 318 def avg_pool3d_grad_eager_fallback(orig_input_shape, grad, ksize, strides, padding, data_format="NDHWC", name=None, ctx=None):
 319   r"""This is the slowpath function for Eager mode.
 320   This is for function avg_pool3d_grad
 321   """
 322   _ctx = ctx if ctx else _context.context()
 323   if not isinstance(ksize, (list, tuple)):
 324     raise TypeError(
 325         "Expected list for 'ksize' argument to "
 326         "'avg_pool3d_grad' Op, not %r." % ksize)
 327   ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
 328   if not isinstance(strides, (list, tuple)):
 329     raise TypeError(
 330         "Expected list for 'strides' argument to "
 331         "'avg_pool3d_grad' Op, not %r." % strides)
 332   strides = [_execute.make_int(_i, "strides") for _i in strides]
 333   padding = _execute.make_str(padding, "padding")
 334   if data_format is None:
 335     data_format = "NDHWC"
 336   data_format = _execute.make_str(data_format, "data_format")
 337   _attr_T, (grad,) = _execute.args_to_matching_eager([grad], _ctx)
 338   orig_input_shape = _ops.convert_to_tensor(orig_input_shape, _dtypes.int32)
 339   _inputs_flat = [orig_input_shape, grad]
 340   _attrs = ("ksize", ksize, "strides", strides, "padding", padding,
 341   "data_format", data_format, "T", _attr_T)
 342   _result = _execute.execute(b"AvgPool3DGrad", 1, inputs=_inputs_flat,
 343                              attrs=_attrs, ctx=_ctx, name=name)
 344   _execute.record_gradient(
 345       "AvgPool3DGrad", _inputs_flat, _attrs, _result, name)
 346   _result, = _result
 347   return _result
 348 
 349 
 350 def avg_pool_grad(orig_input_shape, grad, ksize, strides, padding, data_format="NHWC", name=None):
 351   r"""Computes gradients of the average pooling function.
 352 
 353   Args:
 354     orig_input_shape: A `Tensor` of type `int32`.
 355       1-D.  Shape of the original input to `avg_pool`.
 356     grad: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
 357       4-D with shape `[batch, height, width, channels]`.  Gradients w.r.t.
 358       the output of `avg_pool`.
 359     ksize: A list of `ints` that has length `>= 4`.
 360       The size of the sliding window for each dimension of the input.
 361     strides: A list of `ints` that has length `>= 4`.
 362       The stride of the sliding window for each dimension of the input.
 363     padding: A `string` from: `"SAME", "VALID"`.
 364       The type of padding algorithm to use.
 365     data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`.
 366       Specify the data format of the input and output data. With the
 367       default format "NHWC", the data is stored in the order of:
 368           [batch, in_height, in_width, in_channels].
 369       Alternatively, the format could be "NCHW", the data storage order of:
 370           [batch, in_channels, in_height, in_width].
 371     name: A name for the operation (optional).
 372 
 373   Returns:
 374     A `Tensor`. Has the same type as `grad`.
 375   """
 376   _ctx = _context._context
 377   if _ctx is None or not _ctx._eager_context.is_eager:
 378     if not isinstance(ksize, (list, tuple)):
 379       raise TypeError(
 380           "Expected list for 'ksize' argument to "
 381           "'avg_pool_grad' Op, not %r." % ksize)
 382     ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
 383     if not isinstance(strides, (list, tuple)):
 384       raise TypeError(
 385           "Expected list for 'strides' argument to "
 386           "'avg_pool_grad' Op, not %r." % strides)
 387     strides = [_execute.make_int(_i, "strides") for _i in strides]
 388     padding = _execute.make_str(padding, "padding")
 389     if data_format is None:
 390       data_format = "NHWC"
 391     data_format = _execute.make_str(data_format, "data_format")
 392     _, _, _op = _op_def_lib._apply_op_helper(
 393         "AvgPoolGrad", orig_input_shape=orig_input_shape, grad=grad,
 394         ksize=ksize, strides=strides, padding=padding,
 395         data_format=data_format, name=name)
 396     _result = _op.outputs[:]
 397     _inputs_flat = _op.inputs
 398     _attrs = ("ksize", _op.get_attr("ksize"), "strides",
 399               _op.get_attr("strides"), "padding", _op.get_attr("padding"),
 400               "data_format", _op.get_attr("data_format"), "T",
 401               _op.get_attr("T"))
 402     _execute.record_gradient(
 403       "AvgPoolGrad", _inputs_flat, _attrs, _result, name)
 404     _result, = _result
 405     return _result
 406 
 407   else:
 408     try:
 409       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
 410         _ctx._context_handle, _ctx._eager_context.device_name, "AvgPoolGrad",
 411         name, _ctx._post_execution_callbacks, orig_input_shape, grad, "ksize",
 412         ksize, "strides", strides, "padding", padding, "data_format",
 413         data_format)
 414       return _result
 415     except _core._FallbackException:
 416       return avg_pool_grad_eager_fallback(
 417           orig_input_shape, grad, ksize=ksize, strides=strides,
 418           padding=padding, data_format=data_format, name=name, ctx=_ctx)
 419     except _core._NotOkStatusException as e:
 420       if name is not None:
 421         message = e.message + " name: " + name
 422       else:
 423         message = e.message
 424       _six.raise_from(_core._status_to_exception(e.code, message), None)
 425 
 426 
 427 def avg_pool_grad_eager_fallback(orig_input_shape, grad, ksize, strides, padding, data_format="NHWC", name=None, ctx=None):
 428   r"""This is the slowpath function for Eager mode.
 429   This is for function avg_pool_grad
 430   """
 431   _ctx = ctx if ctx else _context.context()
 432   if not isinstance(ksize, (list, tuple)):
 433     raise TypeError(
 434         "Expected list for 'ksize' argument to "
 435         "'avg_pool_grad' Op, not %r." % ksize)
 436   ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
 437   if not isinstance(strides, (list, tuple)):
 438     raise TypeError(
 439         "Expected list for 'strides' argument to "
 440         "'avg_pool_grad' Op, not %r." % strides)
 441   strides = [_execute.make_int(_i, "strides") for _i in strides]
 442   padding = _execute.make_str(padding, "padding")
 443   if data_format is None:
 444     data_format = "NHWC"
 445   data_format = _execute.make_str(data_format, "data_format")
 446   _attr_T, (grad,) = _execute.args_to_matching_eager([grad], _ctx)
 447   orig_input_shape = _ops.convert_to_tensor(orig_input_shape, _dtypes.int32)
 448   _inputs_flat = [orig_input_shape, grad]
 449   _attrs = ("ksize", ksize, "strides", strides, "padding", padding,
 450   "data_format", data_format, "T", _attr_T)
 451   _result = _execute.execute(b"AvgPoolGrad", 1, inputs=_inputs_flat,
 452                              attrs=_attrs, ctx=_ctx, name=name)
 453   _execute.record_gradient(
 454       "AvgPoolGrad", _inputs_flat, _attrs, _result, name)
 455   _result, = _result
 456   return _result
 457 
 458 
 459 def _batch_norm_with_global_normalization(t, m, v, beta, gamma, variance_epsilon, scale_after_normalization, name=None):
 460   r"""Batch normalization.
 461 
 462   This op is deprecated. Prefer `tf.nn.batch_normalization`.
 463 
 464   Args:
 465     t: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
 466       A 4D input Tensor.
 467     m: A `Tensor`. Must have the same type as `t`.
 468       A 1D mean Tensor with size matching the last dimension of t.
 469       This is the first output from tf.nn.moments,
 470       or a saved moving average thereof.
 471     v: A `Tensor`. Must have the same type as `t`.
 472       A 1D variance Tensor with size matching the last dimension of t.
 473       This is the second output from tf.nn.moments,
 474       or a saved moving average thereof.
 475     beta: A `Tensor`. Must have the same type as `t`.
 476       A 1D beta Tensor with size matching the last dimension of t.
 477       An offset to be added to the normalized tensor.
 478     gamma: A `Tensor`. Must have the same type as `t`.
 479       A 1D gamma Tensor with size matching the last dimension of t.
 480       If "scale_after_normalization" is true, this tensor will be multiplied
 481       with the normalized tensor.
 482     variance_epsilon: A `float`. A small float number to avoid dividing by 0.
 483     scale_after_normalization: A `bool`.
 484       A bool indicating whether the resulted tensor
 485       needs to be multiplied with gamma.
 486     name: A name for the operation (optional).
 487 
 488   Returns:
 489     A `Tensor`. Has the same type as `t`.
 490   """
 491   _ctx = _context._context
 492   if _ctx is None or not _ctx._eager_context.is_eager:
 493     variance_epsilon = _execute.make_float(variance_epsilon, "variance_epsilon")
 494     scale_after_normalization = _execute.make_bool(scale_after_normalization, "scale_after_normalization")
 495     _, _, _op = _op_def_lib._apply_op_helper(
 496         "BatchNormWithGlobalNormalization", t=t, m=m, v=v, beta=beta,
 497         gamma=gamma, variance_epsilon=variance_epsilon,
 498         scale_after_normalization=scale_after_normalization, name=name)
 499     _result = _op.outputs[:]
 500     _inputs_flat = _op.inputs
 501     _attrs = ("T", _op.get_attr("T"), "variance_epsilon",
 502               _op.get_attr("variance_epsilon"), "scale_after_normalization",
 503               _op.get_attr("scale_after_normalization"))
 504     _execute.record_gradient(
 505       "BatchNormWithGlobalNormalization", _inputs_flat, _attrs, _result, name)
 506     _result, = _result
 507     return _result
 508 
 509   else:
 510     try:
 511       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
 512         _ctx._context_handle, _ctx._eager_context.device_name,
 513         "BatchNormWithGlobalNormalization", name,
 514         _ctx._post_execution_callbacks, t, m, v, beta, gamma,
 515         "variance_epsilon", variance_epsilon, "scale_after_normalization",
 516         scale_after_normalization)
 517       return _result
 518     except _core._FallbackException:
 519       return _batch_norm_with_global_normalization_eager_fallback(
 520           t, m, v, beta, gamma, variance_epsilon=variance_epsilon,
 521           scale_after_normalization=scale_after_normalization, name=name,
 522           ctx=_ctx)
 523     except _core._NotOkStatusException as e:
 524       if name is not None:
 525         message = e.message + " name: " + name
 526       else:
 527         message = e.message
 528       _six.raise_from(_core._status_to_exception(e.code, message), None)
 529 
 530 
 531 def _batch_norm_with_global_normalization_eager_fallback(t, m, v, beta, gamma, variance_epsilon, scale_after_normalization, name=None, ctx=None):
 532   r"""This is the slowpath function for Eager mode.
 533   This is for function _batch_norm_with_global_normalization
 534   """
 535   _ctx = ctx if ctx else _context.context()
 536   variance_epsilon = _execute.make_float(variance_epsilon, "variance_epsilon")
 537   scale_after_normalization = _execute.make_bool(scale_after_normalization, "scale_after_normalization")
 538   _attr_T, _inputs_T = _execute.args_to_matching_eager([t, m, v, beta, gamma], _ctx)
 539   (t, m, v, beta, gamma) = _inputs_T
 540   _inputs_flat = [t, m, v, beta, gamma]
 541   _attrs = ("T", _attr_T, "variance_epsilon", variance_epsilon,
 542   "scale_after_normalization", scale_after_normalization)
 543   _result = _execute.execute(b"BatchNormWithGlobalNormalization", 1,
 544                              inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
 545                              name=name)
 546   _execute.record_gradient(
 547       "BatchNormWithGlobalNormalization", _inputs_flat, _attrs, _result, name)
 548   _result, = _result
 549   return _result
 550 
 551 
 552 _batch_norm_with_global_normalization_grad_outputs = ["dx", "dm", "dv", "db",
 553                                                      "dg"]
 554 _BatchNormWithGlobalNormalizationGradOutput = _collections.namedtuple(
 555     "BatchNormWithGlobalNormalizationGrad",
 556     _batch_norm_with_global_normalization_grad_outputs)
 557 
 558 
 559 def batch_norm_with_global_normalization_grad(t, m, v, gamma, backprop, variance_epsilon, scale_after_normalization, name=None):
 560   r"""Gradients for batch normalization.
 561 
 562   This op is deprecated. See `tf.nn.batch_normalization`.
 563 
 564   Args:
 565     t: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
 566       A 4D input Tensor.
 567     m: A `Tensor`. Must have the same type as `t`.
 568       A 1D mean Tensor with size matching the last dimension of t.
 569       This is the first output from tf.nn.moments,
 570       or a saved moving average thereof.
 571     v: A `Tensor`. Must have the same type as `t`.
 572       A 1D variance Tensor with size matching the last dimension of t.
 573       This is the second output from tf.nn.moments,
 574       or a saved moving average thereof.
 575     gamma: A `Tensor`. Must have the same type as `t`.
 576       A 1D gamma Tensor with size matching the last dimension of t.
 577       If "scale_after_normalization" is true, this Tensor will be multiplied
 578       with the normalized Tensor.
 579     backprop: A `Tensor`. Must have the same type as `t`. 4D backprop Tensor.
 580     variance_epsilon: A `float`. A small float number to avoid dividing by 0.
 581     scale_after_normalization: A `bool`.
 582       A bool indicating whether the resulted tensor
 583       needs to be multiplied with gamma.
 584     name: A name for the operation (optional).
 585 
 586   Returns:
 587     A tuple of `Tensor` objects (dx, dm, dv, db, dg).
 588 
 589     dx: A `Tensor`. Has the same type as `t`.
 590     dm: A `Tensor`. Has the same type as `t`.
 591     dv: A `Tensor`. Has the same type as `t`.
 592     db: A `Tensor`. Has the same type as `t`.
 593     dg: A `Tensor`. Has the same type as `t`.
 594   """
 595   _ctx = _context._context
 596   if _ctx is None or not _ctx._eager_context.is_eager:
 597     variance_epsilon = _execute.make_float(variance_epsilon, "variance_epsilon")
 598     scale_after_normalization = _execute.make_bool(scale_after_normalization, "scale_after_normalization")
 599     _, _, _op = _op_def_lib._apply_op_helper(
 600         "BatchNormWithGlobalNormalizationGrad", t=t, m=m, v=v, gamma=gamma,
 601         backprop=backprop, variance_epsilon=variance_epsilon,
 602         scale_after_normalization=scale_after_normalization, name=name)
 603     _result = _op.outputs[:]
 604     _inputs_flat = _op.inputs
 605     _attrs = ("T", _op.get_attr("T"), "variance_epsilon",
 606               _op.get_attr("variance_epsilon"), "scale_after_normalization",
 607               _op.get_attr("scale_after_normalization"))
 608     _execute.record_gradient(
 609       "BatchNormWithGlobalNormalizationGrad", _inputs_flat, _attrs, _result, name)
 610     _result = _BatchNormWithGlobalNormalizationGradOutput._make(_result)
 611     return _result
 612 
 613   else:
 614     try:
 615       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
 616         _ctx._context_handle, _ctx._eager_context.device_name,
 617         "BatchNormWithGlobalNormalizationGrad", name,
 618         _ctx._post_execution_callbacks, t, m, v, gamma, backprop,
 619         "variance_epsilon", variance_epsilon, "scale_after_normalization",
 620         scale_after_normalization)
 621       _result = _BatchNormWithGlobalNormalizationGradOutput._make(_result)
 622       return _result
 623     except _core._FallbackException:
 624       return batch_norm_with_global_normalization_grad_eager_fallback(
 625           t, m, v, gamma, backprop, variance_epsilon=variance_epsilon,
 626           scale_after_normalization=scale_after_normalization, name=name,
 627           ctx=_ctx)
 628     except _core._NotOkStatusException as e:
 629       if name is not None:
 630         message = e.message + " name: " + name
 631       else:
 632         message = e.message
 633       _six.raise_from(_core._status_to_exception(e.code, message), None)
 634 
 635 
 636 def batch_norm_with_global_normalization_grad_eager_fallback(t, m, v, gamma, backprop, variance_epsilon, scale_after_normalization, name=None, ctx=None):
 637   r"""This is the slowpath function for Eager mode.
 638   This is for function batch_norm_with_global_normalization_grad
 639   """
 640   _ctx = ctx if ctx else _context.context()
 641   variance_epsilon = _execute.make_float(variance_epsilon, "variance_epsilon")
 642   scale_after_normalization = _execute.make_bool(scale_after_normalization, "scale_after_normalization")
 643   _attr_T, _inputs_T = _execute.args_to_matching_eager([t, m, v, gamma, backprop], _ctx)
 644   (t, m, v, gamma, backprop) = _inputs_T
 645   _inputs_flat = [t, m, v, gamma, backprop]
 646   _attrs = ("T", _attr_T, "variance_epsilon", variance_epsilon,
 647   "scale_after_normalization", scale_after_normalization)
 648   _result = _execute.execute(b"BatchNormWithGlobalNormalizationGrad", 5,
 649                              inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
 650                              name=name)
 651   _execute.record_gradient(
 652       "BatchNormWithGlobalNormalizationGrad", _inputs_flat, _attrs, _result, name)
 653   _result = _BatchNormWithGlobalNormalizationGradOutput._make(_result)
 654   return _result
 655 
 656 
 657 def bias_add(value, bias, data_format="NHWC", name=None):
 658   r"""Adds `bias` to `value`.
 659 
 660   This is a special case of `tf.add` where `bias` is restricted to be 1-D.
 661   Broadcasting is supported, so `value` may have any number of dimensions.
 662 
 663   Args:
 664     value: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
 665       Any number of dimensions.
 666     bias: A `Tensor`. Must have the same type as `value`.
 667       1-D with size the last dimension of `value`.
 668     data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`.
 669       Specify the data format of the input and output data. With the
 670       default format "NHWC", the bias tensor will be added to the last dimension
 671       of the value tensor.
 672       Alternatively, the format could be "NCHW", the data storage order of:
 673           [batch, in_channels, in_height, in_width].
 674       The tensor will be added to "in_channels", the third-to-the-last
 675           dimension.
 676     name: A name for the operation (optional).
 677 
 678   Returns:
 679     A `Tensor`. Has the same type as `value`.
 680   """
 681   _ctx = _context._context
 682   if _ctx is None or not _ctx._eager_context.is_eager:
 683     if data_format is None:
 684       data_format = "NHWC"
 685     data_format = _execute.make_str(data_format, "data_format")
 686     _, _, _op = _op_def_lib._apply_op_helper(
 687         "BiasAdd", value=value, bias=bias, data_format=data_format, name=name)
 688     _result = _op.outputs[:]
 689     _inputs_flat = _op.inputs
 690     _attrs = ("T", _op.get_attr("T"), "data_format",
 691               _op.get_attr("data_format"))
 692     _execute.record_gradient(
 693       "BiasAdd", _inputs_flat, _attrs, _result, name)
 694     _result, = _result
 695     return _result
 696 
 697   else:
 698     try:
 699       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
 700         _ctx._context_handle, _ctx._eager_context.device_name, "BiasAdd",
 701         name, _ctx._post_execution_callbacks, value, bias, "data_format",
 702         data_format)
 703       return _result
 704     except _core._FallbackException:
 705       return bias_add_eager_fallback(
 706           value, bias, data_format=data_format, name=name, ctx=_ctx)
 707     except _core._NotOkStatusException as e:
 708       if name is not None:
 709         message = e.message + " name: " + name
 710       else:
 711         message = e.message
 712       _six.raise_from(_core._status_to_exception(e.code, message), None)
 713 
 714 
 715 def bias_add_eager_fallback(value, bias, data_format="NHWC", name=None, ctx=None):
 716   r"""This is the slowpath function for Eager mode.
 717   This is for function bias_add
 718   """
 719   _ctx = ctx if ctx else _context.context()
 720   if data_format is None:
 721     data_format = "NHWC"
 722   data_format = _execute.make_str(data_format, "data_format")
 723   _attr_T, _inputs_T = _execute.args_to_matching_eager([value, bias], _ctx)
 724   (value, bias) = _inputs_T
 725   _inputs_flat = [value, bias]
 726   _attrs = ("T", _attr_T, "data_format", data_format)
 727   _result = _execute.execute(b"BiasAdd", 1, inputs=_inputs_flat, attrs=_attrs,
 728                              ctx=_ctx, name=name)
 729   _execute.record_gradient(
 730       "BiasAdd", _inputs_flat, _attrs, _result, name)
 731   _result, = _result
 732   return _result
 733 
 734 
 735 def bias_add_grad(out_backprop, data_format="NHWC", name=None):
 736   r"""The backward operation for "BiasAdd" on the "bias" tensor.
 737 
 738   It accumulates all the values from out_backprop into the feature dimension.
 739   For NHWC data format, the feature dimension is the last. For NCHW data format,
 740   the feature dimension is the third-to-last.
 741 
 742   Args:
 743     out_backprop: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
 744       Any number of dimensions.
 745     data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`.
 746       Specify the data format of the input and output data. With the
 747       default format "NHWC", the bias tensor will be added to the last dimension
 748       of the value tensor.
 749       Alternatively, the format could be "NCHW", the data storage order of:
 750           [batch, in_channels, in_height, in_width].
 751       The tensor will be added to "in_channels", the third-to-the-last
 752           dimension.
 753     name: A name for the operation (optional).
 754 
 755   Returns:
 756     A `Tensor`. Has the same type as `out_backprop`.
 757   """
 758   _ctx = _context._context
 759   if _ctx is None or not _ctx._eager_context.is_eager:
 760     if data_format is None:
 761       data_format = "NHWC"
 762     data_format = _execute.make_str(data_format, "data_format")
 763     _, _, _op = _op_def_lib._apply_op_helper(
 764         "BiasAddGrad", out_backprop=out_backprop, data_format=data_format,
 765         name=name)
 766     _result = _op.outputs[:]
 767     _inputs_flat = _op.inputs
 768     _attrs = ("T", _op.get_attr("T"), "data_format",
 769               _op.get_attr("data_format"))
 770     _execute.record_gradient(
 771       "BiasAddGrad", _inputs_flat, _attrs, _result, name)
 772     _result, = _result
 773     return _result
 774 
 775   else:
 776     try:
 777       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
 778         _ctx._context_handle, _ctx._eager_context.device_name, "BiasAddGrad",
 779         name, _ctx._post_execution_callbacks, out_backprop, "data_format",
 780         data_format)
 781       return _result
 782     except _core._FallbackException:
 783       return bias_add_grad_eager_fallback(
 784           out_backprop, data_format=data_format, name=name, ctx=_ctx)
 785     except _core._NotOkStatusException as e:
 786       if name is not None:
 787         message = e.message + " name: " + name
 788       else:
 789         message = e.message
 790       _six.raise_from(_core._status_to_exception(e.code, message), None)
 791 
 792 
 793 def bias_add_grad_eager_fallback(out_backprop, data_format="NHWC", name=None, ctx=None):
 794   r"""This is the slowpath function for Eager mode.
 795   This is for function bias_add_grad
 796   """
 797   _ctx = ctx if ctx else _context.context()
 798   if data_format is None:
 799     data_format = "NHWC"
 800   data_format = _execute.make_str(data_format, "data_format")
 801   _attr_T, (out_backprop,) = _execute.args_to_matching_eager([out_backprop], _ctx)
 802   _inputs_flat = [out_backprop]
 803   _attrs = ("T", _attr_T, "data_format", data_format)
 804   _result = _execute.execute(b"BiasAddGrad", 1, inputs=_inputs_flat,
 805                              attrs=_attrs, ctx=_ctx, name=name)
 806   _execute.record_gradient(
 807       "BiasAddGrad", _inputs_flat, _attrs, _result, name)
 808   _result, = _result
 809   return _result
 810 
 811 
 812 def bias_add_v1(value, bias, name=None):
 813   r"""Adds `bias` to `value`.
 814 
 815   This is a deprecated version of BiasAdd and will be soon removed.
 816 
 817   This is a special case of `tf.add` where `bias` is restricted to be 1-D.
 818   Broadcasting is supported, so `value` may have any number of dimensions.
 819 
 820   Args:
 821     value: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
 822       Any number of dimensions.
 823     bias: A `Tensor`. Must have the same type as `value`.
 824       1-D with size the last dimension of `value`.
 825     name: A name for the operation (optional).
 826 
 827   Returns:
 828     A `Tensor`. Has the same type as `value`.
 829   """
 830   _ctx = _context._context
 831   if _ctx is None or not _ctx._eager_context.is_eager:
 832     _, _, _op = _op_def_lib._apply_op_helper(
 833         "BiasAddV1", value=value, bias=bias, name=name)
 834     _result = _op.outputs[:]
 835     _inputs_flat = _op.inputs
 836     _attrs = ("T", _op.get_attr("T"))
 837     _execute.record_gradient(
 838       "BiasAddV1", _inputs_flat, _attrs, _result, name)
 839     _result, = _result
 840     return _result
 841 
 842   else:
 843     try:
 844       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
 845         _ctx._context_handle, _ctx._eager_context.device_name, "BiasAddV1",
 846         name, _ctx._post_execution_callbacks, value, bias)
 847       return _result
 848     except _core._FallbackException:
 849       return bias_add_v1_eager_fallback(
 850           value, bias, name=name, ctx=_ctx)
 851     except _core._NotOkStatusException as e:
 852       if name is not None:
 853         message = e.message + " name: " + name
 854       else:
 855         message = e.message
 856       _six.raise_from(_core._status_to_exception(e.code, message), None)
 857 
 858 
 859 def bias_add_v1_eager_fallback(value, bias, name=None, ctx=None):
 860   r"""This is the slowpath function for Eager mode.
 861   This is for function bias_add_v1
 862   """
 863   _ctx = ctx if ctx else _context.context()
 864   _attr_T, _inputs_T = _execute.args_to_matching_eager([value, bias], _ctx)
 865   (value, bias) = _inputs_T
 866   _inputs_flat = [value, bias]
 867   _attrs = ("T", _attr_T)
 868   _result = _execute.execute(b"BiasAddV1", 1, inputs=_inputs_flat,
 869                              attrs=_attrs, ctx=_ctx, name=name)
 870   _execute.record_gradient(
 871       "BiasAddV1", _inputs_flat, _attrs, _result, name)
 872   _result, = _result
 873   return _result
 874 
 875 
 876 @tf_export('nn.conv2d')
 877 def conv2d(input, filter, strides, padding, use_cudnn_on_gpu=True, data_format="NHWC", dilations=[1, 1, 1, 1], name=None):
 878   r"""Computes a 2-D convolution given 4-D `input` and `filter` tensors.
 879 
 880   Given an input tensor of shape `[batch, in_height, in_width, in_channels]`
 881   and a filter / kernel tensor of shape
 882   `[filter_height, filter_width, in_channels, out_channels]`, this op
 883   performs the following:
 884 
 885   1. Flattens the filter to a 2-D matrix with shape
 886      `[filter_height * filter_width * in_channels, output_channels]`.
 887   2. Extracts image patches from the input tensor to form a *virtual*
 888      tensor of shape `[batch, out_height, out_width,
 889      filter_height * filter_width * in_channels]`.
 890   3. For each patch, right-multiplies the filter matrix and the image patch
 891      vector.
 892 
 893   In detail, with the default NHWC format,
 894 
 895       output[b, i, j, k] =
 896           sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q] *
 897                           filter[di, dj, q, k]
 898 
 899   Must have `strides[0] = strides[3] = 1`.  For the most common case of the same
 900   horizontal and vertices strides, `strides = [1, stride, stride, 1]`.
 901 
 902   Args:
 903     input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
 904       A 4-D tensor. The dimension order is interpreted according to the value
 905       of `data_format`, see below for details.
 906     filter: A `Tensor`. Must have the same type as `input`.
 907       A 4-D tensor of shape
 908       `[filter_height, filter_width, in_channels, out_channels]`
 909     strides: A list of `ints`.
 910       1-D tensor of length 4.  The stride of the sliding window for each
 911       dimension of `input`. The dimension order is determined by the value of
 912       `data_format`, see below for details.
 913     padding: A `string` from: `"SAME", "VALID"`.
 914       The type of padding algorithm to use.
 915     use_cudnn_on_gpu: An optional `bool`. Defaults to `True`.
 916     data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`.
 917       Specify the data format of the input and output data. With the
 918       default format "NHWC", the data is stored in the order of:
 919           [batch, height, width, channels].
 920       Alternatively, the format could be "NCHW", the data storage order of:
 921           [batch, channels, height, width].
 922     dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
 923       1-D tensor of length 4.  The dilation factor for each dimension of
 924       `input`. If set to k > 1, there will be k-1 skipped cells between each
 925       filter element on that dimension. The dimension order is determined by the
 926       value of `data_format`, see above for details. Dilations in the batch and
 927       depth dimensions must be 1.
 928     name: A name for the operation (optional).
 929 
 930   Returns:
 931     A `Tensor`. Has the same type as `input`.
 932   """
 933   _ctx = _context._context
 934   if _ctx is None or not _ctx._eager_context.is_eager:
 935     if not isinstance(strides, (list, tuple)):
 936       raise TypeError(
 937           "Expected list for 'strides' argument to "
 938           "'conv2d' Op, not %r." % strides)
 939     strides = [_execute.make_int(_i, "strides") for _i in strides]
 940     padding = _execute.make_str(padding, "padding")
 941     if use_cudnn_on_gpu is None:
 942       use_cudnn_on_gpu = True
 943     use_cudnn_on_gpu = _execute.make_bool(use_cudnn_on_gpu, "use_cudnn_on_gpu")
 944     if data_format is None:
 945       data_format = "NHWC"
 946     data_format = _execute.make_str(data_format, "data_format")
 947     if dilations is None:
 948       dilations = [1, 1, 1, 1]
 949     if not isinstance(dilations, (list, tuple)):
 950       raise TypeError(
 951           "Expected list for 'dilations' argument to "
 952           "'conv2d' Op, not %r." % dilations)
 953     dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
 954     _, _, _op = _op_def_lib._apply_op_helper(
 955         "Conv2D", input=input, filter=filter, strides=strides,
 956         padding=padding, use_cudnn_on_gpu=use_cudnn_on_gpu,
 957         data_format=data_format, dilations=dilations, name=name)
 958     _result = _op.outputs[:]
 959     _inputs_flat = _op.inputs
 960     _attrs = ("T", _op.get_attr("T"), "strides", _op.get_attr("strides"),
 961               "use_cudnn_on_gpu", _op.get_attr("use_cudnn_on_gpu"), "padding",
 962               _op.get_attr("padding"), "data_format",
 963               _op.get_attr("data_format"), "dilations",
 964               _op.get_attr("dilations"))
 965     _execute.record_gradient(
 966       "Conv2D", _inputs_flat, _attrs, _result, name)
 967     _result, = _result
 968     return _result
 969 
 970   else:
 971     try:
 972       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
 973         _ctx._context_handle, _ctx._eager_context.device_name, "Conv2D", name,
 974         _ctx._post_execution_callbacks, input, filter, "strides", strides,
 975         "use_cudnn_on_gpu", use_cudnn_on_gpu, "padding", padding,
 976         "data_format", data_format, "dilations", dilations)
 977       return _result
 978     except _core._FallbackException:
 979       return conv2d_eager_fallback(
 980           input, filter, strides=strides, use_cudnn_on_gpu=use_cudnn_on_gpu,
 981           padding=padding, data_format=data_format, dilations=dilations,
 982           name=name, ctx=_ctx)
 983     except _core._NotOkStatusException as e:
 984       if name is not None:
 985         message = e.message + " name: " + name
 986       else:
 987         message = e.message
 988       _six.raise_from(_core._status_to_exception(e.code, message), None)
 989 
 990 
 991 def conv2d_eager_fallback(input, filter, strides, padding, use_cudnn_on_gpu=True, data_format="NHWC", dilations=[1, 1, 1, 1], name=None, ctx=None):
 992   r"""This is the slowpath function for Eager mode.
 993   This is for function conv2d
 994   """
 995   _ctx = ctx if ctx else _context.context()
 996   if not isinstance(strides, (list, tuple)):
 997     raise TypeError(
 998         "Expected list for 'strides' argument to "
 999         "'conv2d' Op, not %r." % strides)
1000   strides = [_execute.make_int(_i, "strides") for _i in strides]
1001   padding = _execute.make_str(padding, "padding")
1002   if use_cudnn_on_gpu is None:
1003     use_cudnn_on_gpu = True
1004   use_cudnn_on_gpu = _execute.make_bool(use_cudnn_on_gpu, "use_cudnn_on_gpu")
1005   if data_format is None:
1006     data_format = "NHWC"
1007   data_format = _execute.make_str(data_format, "data_format")
1008   if dilations is None:
1009     dilations = [1, 1, 1, 1]
1010   if not isinstance(dilations, (list, tuple)):
1011     raise TypeError(
1012         "Expected list for 'dilations' argument to "
1013         "'conv2d' Op, not %r." % dilations)
1014   dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
1015   _attr_T, _inputs_T = _execute.args_to_matching_eager([input, filter], _ctx)
1016   (input, filter) = _inputs_T
1017   _inputs_flat = [input, filter]
1018   _attrs = ("T", _attr_T, "strides", strides, "use_cudnn_on_gpu",
1019   use_cudnn_on_gpu, "padding", padding, "data_format", data_format,
1020   "dilations", dilations)
1021   _result = _execute.execute(b"Conv2D", 1, inputs=_inputs_flat, attrs=_attrs,
1022                              ctx=_ctx, name=name)
1023   _execute.record_gradient(
1024       "Conv2D", _inputs_flat, _attrs, _result, name)
1025   _result, = _result
1026   return _result
1027 
1028 
1029 @tf_export('nn.conv2d_backprop_filter')
1030 def conv2d_backprop_filter(input, filter_sizes, out_backprop, strides, padding, use_cudnn_on_gpu=True, data_format="NHWC", dilations=[1, 1, 1, 1], name=None):
1031   r"""Computes the gradients of convolution with respect to the filter.
1032 
1033   Args:
1034     input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
1035       4-D with shape `[batch, in_height, in_width, in_channels]`.
1036     filter_sizes: A `Tensor` of type `int32`.
1037       An integer vector representing the tensor shape of `filter`,
1038       where `filter` is a 4-D
1039       `[filter_height, filter_width, in_channels, out_channels]` tensor.
1040     out_backprop: A `Tensor`. Must have the same type as `input`.
1041       4-D with shape `[batch, out_height, out_width, out_channels]`.
1042       Gradients w.r.t. the output of the convolution.
1043     strides: A list of `ints`.
1044       The stride of the sliding window for each dimension of the input
1045       of the convolution. Must be in the same order as the dimension specified with
1046       format.
1047     padding: A `string` from: `"SAME", "VALID"`.
1048       The type of padding algorithm to use.
1049     use_cudnn_on_gpu: An optional `bool`. Defaults to `True`.
1050     data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`.
1051       Specify the data format of the input and output data. With the
1052       default format "NHWC", the data is stored in the order of:
1053           [batch, in_height, in_width, in_channels].
1054       Alternatively, the format could be "NCHW", the data storage order of:
1055           [batch, in_channels, in_height, in_width].
1056     dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
1057       1-D tensor of length 4.  The dilation factor for each dimension of
1058       `input`. If set to k > 1, there will be k-1 skipped cells between each filter
1059       element on that dimension. The dimension order is determined by the value of
1060       `data_format`, see above for details. Dilations in the batch and depth
1061       dimensions must be 1.
1062     name: A name for the operation (optional).
1063 
1064   Returns:
1065     A `Tensor`. Has the same type as `input`.
1066   """
1067   _ctx = _context._context
1068   if _ctx is None or not _ctx._eager_context.is_eager:
1069     if not isinstance(strides, (list, tuple)):
1070       raise TypeError(
1071           "Expected list for 'strides' argument to "
1072           "'conv2d_backprop_filter' Op, not %r." % strides)
1073     strides = [_execute.make_int(_i, "strides") for _i in strides]
1074     padding = _execute.make_str(padding, "padding")
1075     if use_cudnn_on_gpu is None:
1076       use_cudnn_on_gpu = True
1077     use_cudnn_on_gpu = _execute.make_bool(use_cudnn_on_gpu, "use_cudnn_on_gpu")
1078     if data_format is None:
1079       data_format = "NHWC"
1080     data_format = _execute.make_str(data_format, "data_format")
1081     if dilations is None:
1082       dilations = [1, 1, 1, 1]
1083     if not isinstance(dilations, (list, tuple)):
1084       raise TypeError(
1085           "Expected list for 'dilations' argument to "
1086           "'conv2d_backprop_filter' Op, not %r." % dilations)
1087     dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
1088     _, _, _op = _op_def_lib._apply_op_helper(
1089         "Conv2DBackpropFilter", input=input, filter_sizes=filter_sizes,
1090         out_backprop=out_backprop, strides=strides, padding=padding,
1091         use_cudnn_on_gpu=use_cudnn_on_gpu, data_format=data_format,
1092         dilations=dilations, name=name)
1093     _result = _op.outputs[:]
1094     _inputs_flat = _op.inputs
1095     _attrs = ("T", _op.get_attr("T"), "strides", _op.get_attr("strides"),
1096               "use_cudnn_on_gpu", _op.get_attr("use_cudnn_on_gpu"), "padding",
1097               _op.get_attr("padding"), "data_format",
1098               _op.get_attr("data_format"), "dilations",
1099               _op.get_attr("dilations"))
1100     _execute.record_gradient(
1101       "Conv2DBackpropFilter", _inputs_flat, _attrs, _result, name)
1102     _result, = _result
1103     return _result
1104 
1105   else:
1106     try:
1107       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
1108         _ctx._context_handle, _ctx._eager_context.device_name,
1109         "Conv2DBackpropFilter", name, _ctx._post_execution_callbacks, input,
1110         filter_sizes, out_backprop, "strides", strides, "use_cudnn_on_gpu",
1111         use_cudnn_on_gpu, "padding", padding, "data_format", data_format,
1112         "dilations", dilations)
1113       return _result
1114     except _core._FallbackException:
1115       return conv2d_backprop_filter_eager_fallback(
1116           input, filter_sizes, out_backprop, strides=strides,
1117           use_cudnn_on_gpu=use_cudnn_on_gpu, padding=padding,
1118           data_format=data_format, dilations=dilations, name=name, ctx=_ctx)
1119     except _core._NotOkStatusException as e:
1120       if name is not None:
1121         message = e.message + " name: " + name
1122       else:
1123         message = e.message
1124       _six.raise_from(_core._status_to_exception(e.code, message), None)
1125 
1126 
1127 def conv2d_backprop_filter_eager_fallback(input, filter_sizes, out_backprop, strides, padding, use_cudnn_on_gpu=True, data_format="NHWC", dilations=[1, 1, 1, 1], name=None, ctx=None):
1128   r"""This is the slowpath function for Eager mode.
1129   This is for function conv2d_backprop_filter
1130   """
1131   _ctx = ctx if ctx else _context.context()
1132   if not isinstance(strides, (list, tuple)):
1133     raise TypeError(
1134         "Expected list for 'strides' argument to "
1135         "'conv2d_backprop_filter' Op, not %r." % strides)
1136   strides = [_execute.make_int(_i, "strides") for _i in strides]
1137   padding = _execute.make_str(padding, "padding")
1138   if use_cudnn_on_gpu is None:
1139     use_cudnn_on_gpu = True
1140   use_cudnn_on_gpu = _execute.make_bool(use_cudnn_on_gpu, "use_cudnn_on_gpu")
1141   if data_format is None:
1142     data_format = "NHWC"
1143   data_format = _execute.make_str(data_format, "data_format")
1144   if dilations is None:
1145     dilations = [1, 1, 1, 1]
1146   if not isinstance(dilations, (list, tuple)):
1147     raise TypeError(
1148         "Expected list for 'dilations' argument to "
1149         "'conv2d_backprop_filter' Op, not %r." % dilations)
1150   dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
1151   _attr_T, _inputs_T = _execute.args_to_matching_eager([input, out_backprop], _ctx)
1152   (input, out_backprop) = _inputs_T
1153   filter_sizes = _ops.convert_to_tensor(filter_sizes, _dtypes.int32)
1154   _inputs_flat = [input, filter_sizes, out_backprop]
1155   _attrs = ("T", _attr_T, "strides", strides, "use_cudnn_on_gpu",
1156   use_cudnn_on_gpu, "padding", padding, "data_format", data_format,
1157   "dilations", dilations)
1158   _result = _execute.execute(b"Conv2DBackpropFilter", 1, inputs=_inputs_flat,
1159                              attrs=_attrs, ctx=_ctx, name=name)
1160   _execute.record_gradient(
1161       "Conv2DBackpropFilter", _inputs_flat, _attrs, _result, name)
1162   _result, = _result
1163   return _result
1164 
1165 
1166 @tf_export('nn.conv2d_backprop_input')
1167 def conv2d_backprop_input(input_sizes, filter, out_backprop, strides, padding, use_cudnn_on_gpu=True, data_format="NHWC", dilations=[1, 1, 1, 1], name=None):
1168   r"""Computes the gradients of convolution with respect to the input.
1169 
1170   Args:
1171     input_sizes: A `Tensor` of type `int32`.
1172       An integer vector representing the shape of `input`,
1173       where `input` is a 4-D `[batch, height, width, channels]` tensor.
1174     filter: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
1175       4-D with shape
1176       `[filter_height, filter_width, in_channels, out_channels]`.
1177     out_backprop: A `Tensor`. Must have the same type as `filter`.
1178       4-D with shape `[batch, out_height, out_width, out_channels]`.
1179       Gradients w.r.t. the output of the convolution.
1180     strides: A list of `ints`.
1181       The stride of the sliding window for each dimension of the input
1182       of the convolution. Must be in the same order as the dimension specified with
1183       format.
1184     padding: A `string` from: `"SAME", "VALID"`.
1185       The type of padding algorithm to use.
1186     use_cudnn_on_gpu: An optional `bool`. Defaults to `True`.
1187     data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`.
1188       Specify the data format of the input and output data. With the
1189       default format "NHWC", the data is stored in the order of:
1190           [batch, in_height, in_width, in_channels].
1191       Alternatively, the format could be "NCHW", the data storage order of:
1192           [batch, in_channels, in_height, in_width].
1193     dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
1194       1-D tensor of length 4.  The dilation factor for each dimension of
1195       `input`. If set to k > 1, there will be k-1 skipped cells between each filter
1196       element on that dimension. The dimension order is determined by the value of
1197       `data_format`, see above for details. Dilations in the batch and depth
1198       dimensions must be 1.
1199     name: A name for the operation (optional).
1200 
1201   Returns:
1202     A `Tensor`. Has the same type as `filter`.
1203   """
1204   _ctx = _context._context
1205   if _ctx is None or not _ctx._eager_context.is_eager:
1206     if not isinstance(strides, (list, tuple)):
1207       raise TypeError(
1208           "Expected list for 'strides' argument to "
1209           "'conv2d_backprop_input' Op, not %r." % strides)
1210     strides = [_execute.make_int(_i, "strides") for _i in strides]
1211     padding = _execute.make_str(padding, "padding")
1212     if use_cudnn_on_gpu is None:
1213       use_cudnn_on_gpu = True
1214     use_cudnn_on_gpu = _execute.make_bool(use_cudnn_on_gpu, "use_cudnn_on_gpu")
1215     if data_format is None:
1216       data_format = "NHWC"
1217     data_format = _execute.make_str(data_format, "data_format")
1218     if dilations is None:
1219       dilations = [1, 1, 1, 1]
1220     if not isinstance(dilations, (list, tuple)):
1221       raise TypeError(
1222           "Expected list for 'dilations' argument to "
1223           "'conv2d_backprop_input' Op, not %r." % dilations)
1224     dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
1225     _, _, _op = _op_def_lib._apply_op_helper(
1226         "Conv2DBackpropInput", input_sizes=input_sizes, filter=filter,
1227         out_backprop=out_backprop, strides=strides, padding=padding,
1228         use_cudnn_on_gpu=use_cudnn_on_gpu, data_format=data_format,
1229         dilations=dilations, name=name)
1230     _result = _op.outputs[:]
1231     _inputs_flat = _op.inputs
1232     _attrs = ("T", _op.get_attr("T"), "strides", _op.get_attr("strides"),
1233               "use_cudnn_on_gpu", _op.get_attr("use_cudnn_on_gpu"), "padding",
1234               _op.get_attr("padding"), "data_format",
1235               _op.get_attr("data_format"), "dilations",
1236               _op.get_attr("dilations"))
1237     _execute.record_gradient(
1238       "Conv2DBackpropInput", _inputs_flat, _attrs, _result, name)
1239     _result, = _result
1240     return _result
1241 
1242   else:
1243     try:
1244       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
1245         _ctx._context_handle, _ctx._eager_context.device_name,
1246         "Conv2DBackpropInput", name, _ctx._post_execution_callbacks,
1247         input_sizes, filter, out_backprop, "strides", strides,
1248         "use_cudnn_on_gpu", use_cudnn_on_gpu, "padding", padding,
1249         "data_format", data_format, "dilations", dilations)
1250       return _result
1251     except _core._FallbackException:
1252       return conv2d_backprop_input_eager_fallback(
1253           input_sizes, filter, out_backprop, strides=strides,
1254           use_cudnn_on_gpu=use_cudnn_on_gpu, padding=padding,
1255           data_format=data_format, dilations=dilations, name=name, ctx=_ctx)
1256     except _core._NotOkStatusException as e:
1257       if name is not None:
1258         message = e.message + " name: " + name
1259       else:
1260         message = e.message
1261       _six.raise_from(_core._status_to_exception(e.code, message), None)
1262 
1263 
1264 def conv2d_backprop_input_eager_fallback(input_sizes, filter, out_backprop, strides, padding, use_cudnn_on_gpu=True, data_format="NHWC", dilations=[1, 1, 1, 1], name=None, ctx=None):
1265   r"""This is the slowpath function for Eager mode.
1266   This is for function conv2d_backprop_input
1267   """
1268   _ctx = ctx if ctx else _context.context()
1269   if not isinstance(strides, (list, tuple)):
1270     raise TypeError(
1271         "Expected list for 'strides' argument to "
1272         "'conv2d_backprop_input' Op, not %r." % strides)
1273   strides = [_execute.make_int(_i, "strides") for _i in strides]
1274   padding = _execute.make_str(padding, "padding")
1275   if use_cudnn_on_gpu is None:
1276     use_cudnn_on_gpu = True
1277   use_cudnn_on_gpu = _execute.make_bool(use_cudnn_on_gpu, "use_cudnn_on_gpu")
1278   if data_format is None:
1279     data_format = "NHWC"
1280   data_format = _execute.make_str(data_format, "data_format")
1281   if dilations is None:
1282     dilations = [1, 1, 1, 1]
1283   if not isinstance(dilations, (list, tuple)):
1284     raise TypeError(
1285         "Expected list for 'dilations' argument to "
1286         "'conv2d_backprop_input' Op, not %r." % dilations)
1287   dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
1288   _attr_T, _inputs_T = _execute.args_to_matching_eager([filter, out_backprop], _ctx)
1289   (filter, out_backprop) = _inputs_T
1290   input_sizes = _ops.convert_to_tensor(input_sizes, _dtypes.int32)
1291   _inputs_flat = [input_sizes, filter, out_backprop]
1292   _attrs = ("T", _attr_T, "strides", strides, "use_cudnn_on_gpu",
1293   use_cudnn_on_gpu, "padding", padding, "data_format", data_format,
1294   "dilations", dilations)
1295   _result = _execute.execute(b"Conv2DBackpropInput", 1, inputs=_inputs_flat,
1296                              attrs=_attrs, ctx=_ctx, name=name)
1297   _execute.record_gradient(
1298       "Conv2DBackpropInput", _inputs_flat, _attrs, _result, name)
1299   _result, = _result
1300   return _result
1301 
1302 
1303 @tf_export('nn.conv3d')
1304 def conv3d(input, filter, strides, padding, data_format="NDHWC", dilations=[1, 1, 1, 1, 1], name=None):
1305   r"""Computes a 3-D convolution given 5-D `input` and `filter` tensors.
1306 
1307   In signal processing, cross-correlation is a measure of similarity of
1308   two waveforms as a function of a time-lag applied to one of them. This
1309   is also known as a sliding dot product or sliding inner-product.
1310 
1311   Our Conv3D implements a form of cross-correlation.
1312 
1313   Args:
1314     input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
1315       Shape `[batch, in_depth, in_height, in_width, in_channels]`.
1316     filter: A `Tensor`. Must have the same type as `input`.
1317       Shape `[filter_depth, filter_height, filter_width, in_channels,
1318       out_channels]`. `in_channels` must match between `input` and `filter`.
1319     strides: A list of `ints` that has length `>= 5`.
1320       1-D tensor of length 5. The stride of the sliding window for each
1321       dimension of `input`. Must have `strides[0] = strides[4] = 1`.
1322     padding: A `string` from: `"SAME", "VALID"`.
1323       The type of padding algorithm to use.
1324     data_format: An optional `string` from: `"NDHWC", "NCDHW"`. Defaults to `"NDHWC"`.
1325       The data format of the input and output data. With the
1326       default format "NDHWC", the data is stored in the order of:
1327           [batch, in_depth, in_height, in_width, in_channels].
1328       Alternatively, the format could be "NCDHW", the data storage order is:
1329           [batch, in_channels, in_depth, in_height, in_width].
1330     dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1, 1]`.
1331       1-D tensor of length 5.  The dilation factor for each dimension of
1332       `input`. If set to k > 1, there will be k-1 skipped cells between each
1333       filter element on that dimension. The dimension order is determined by the
1334       value of `data_format`, see above for details. Dilations in the batch and
1335       depth dimensions must be 1.
1336     name: A name for the operation (optional).
1337 
1338   Returns:
1339     A `Tensor`. Has the same type as `input`.
1340   """
1341   _ctx = _context._context
1342   if _ctx is None or not _ctx._eager_context.is_eager:
1343     if not isinstance(strides, (list, tuple)):
1344       raise TypeError(
1345           "Expected list for 'strides' argument to "
1346           "'conv3d' Op, not %r." % strides)
1347     strides = [_execute.make_int(_i, "strides") for _i in strides]
1348     padding = _execute.make_str(padding, "padding")
1349     if data_format is None:
1350       data_format = "NDHWC"
1351     data_format = _execute.make_str(data_format, "data_format")
1352     if dilations is None:
1353       dilations = [1, 1, 1, 1, 1]
1354     if not isinstance(dilations, (list, tuple)):
1355       raise TypeError(
1356           "Expected list for 'dilations' argument to "
1357           "'conv3d' Op, not %r." % dilations)
1358     dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
1359     _, _, _op = _op_def_lib._apply_op_helper(
1360         "Conv3D", input=input, filter=filter, strides=strides,
1361         padding=padding, data_format=data_format, dilations=dilations,
1362         name=name)
1363     _result = _op.outputs[:]
1364     _inputs_flat = _op.inputs
1365     _attrs = ("T", _op.get_attr("T"), "strides", _op.get_attr("strides"),
1366               "padding", _op.get_attr("padding"), "data_format",
1367               _op.get_attr("data_format"), "dilations",
1368               _op.get_attr("dilations"))
1369     _execute.record_gradient(
1370       "Conv3D", _inputs_flat, _attrs, _result, name)
1371     _result, = _result
1372     return _result
1373 
1374   else:
1375     try:
1376       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
1377         _ctx._context_handle, _ctx._eager_context.device_name, "Conv3D", name,
1378         _ctx._post_execution_callbacks, input, filter, "strides", strides,
1379         "padding", padding, "data_format", data_format, "dilations",
1380         dilations)
1381       return _result
1382     except _core._FallbackException:
1383       return conv3d_eager_fallback(
1384           input, filter, strides=strides, padding=padding,
1385           data_format=data_format, dilations=dilations, name=name, ctx=_ctx)
1386     except _core._NotOkStatusException as e:
1387       if name is not None:
1388         message = e.message + " name: " + name
1389       else:
1390         message = e.message
1391       _six.raise_from(_core._status_to_exception(e.code, message), None)
1392 
1393 
1394 def conv3d_eager_fallback(input, filter, strides, padding, data_format="NDHWC", dilations=[1, 1, 1, 1, 1], name=None, ctx=None):
1395   r"""This is the slowpath function for Eager mode.
1396   This is for function conv3d
1397   """
1398   _ctx = ctx if ctx else _context.context()
1399   if not isinstance(strides, (list, tuple)):
1400     raise TypeError(
1401         "Expected list for 'strides' argument to "
1402         "'conv3d' Op, not %r." % strides)
1403   strides = [_execute.make_int(_i, "strides") for _i in strides]
1404   padding = _execute.make_str(padding, "padding")
1405   if data_format is None:
1406     data_format = "NDHWC"
1407   data_format = _execute.make_str(data_format, "data_format")
1408   if dilations is None:
1409     dilations = [1, 1, 1, 1, 1]
1410   if not isinstance(dilations, (list, tuple)):
1411     raise TypeError(
1412         "Expected list for 'dilations' argument to "
1413         "'conv3d' Op, not %r." % dilations)
1414   dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
1415   _attr_T, _inputs_T = _execute.args_to_matching_eager([input, filter], _ctx)
1416   (input, filter) = _inputs_T
1417   _inputs_flat = [input, filter]
1418   _attrs = ("T", _attr_T, "strides", strides, "padding", padding,
1419   "data_format", data_format, "dilations", dilations)
1420   _result = _execute.execute(b"Conv3D", 1, inputs=_inputs_flat, attrs=_attrs,
1421                              ctx=_ctx, name=name)
1422   _execute.record_gradient(
1423       "Conv3D", _inputs_flat, _attrs, _result, name)
1424   _result, = _result
1425   return _result
1426 
1427 
1428 def conv3d_backprop_filter(input, filter, out_backprop, strides, padding, dilations=[1, 1, 1, 1, 1], name=None):
1429   r"""Computes the gradients of 3-D convolution with respect to the filter.
1430 
1431   Args:
1432     input: A `Tensor`. Must be one of the following types: `half`, `float32`, `float64`.
1433       Shape `[batch, depth, rows, cols, in_channels]`.
1434     filter: A `Tensor`. Must have the same type as `input`.
1435       Shape `[depth, rows, cols, in_channels, out_channels]`.
1436       `in_channels` must match between `input` and `filter`.
1437     out_backprop: A `Tensor`. Must have the same type as `input`.
1438       Backprop signal of shape `[batch, out_depth, out_rows, out_cols,
1439       out_channels]`.
1440     strides: A list of `ints` that has length `>= 5`.
1441       1-D tensor of length 5. The stride of the sliding window for each
1442       dimension of `input`. Must have `strides[0] = strides[4] = 1`.
1443     padding: A `string` from: `"SAME", "VALID"`.
1444       The type of padding algorithm to use.
1445     dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1, 1]`.
1446     name: A name for the operation (optional).
1447 
1448   Returns:
1449     A `Tensor`. Has the same type as `input`.
1450   """
1451   _ctx = _context._context
1452   if _ctx is None or not _ctx._eager_context.is_eager:
1453     if not isinstance(strides, (list, tuple)):
1454       raise TypeError(
1455           "Expected list for 'strides' argument to "
1456           "'conv3d_backprop_filter' Op, not %r." % strides)
1457     strides = [_execute.make_int(_i, "strides") for _i in strides]
1458     padding = _execute.make_str(padding, "padding")
1459     if dilations is None:
1460       dilations = [1, 1, 1, 1, 1]
1461     if not isinstance(dilations, (list, tuple)):
1462       raise TypeError(
1463           "Expected list for 'dilations' argument to "
1464           "'conv3d_backprop_filter' Op, not %r." % dilations)
1465     dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
1466     _, _, _op = _op_def_lib._apply_op_helper(
1467         "Conv3DBackpropFilter", input=input, filter=filter,
1468         out_backprop=out_backprop, strides=strides, padding=padding,
1469         dilations=dilations, name=name)
1470     _result = _op.outputs[:]
1471     _inputs_flat = _op.inputs
1472     _attrs = ("T", _op.get_attr("T"), "strides", _op.get_attr("strides"),
1473               "padding", _op.get_attr("padding"), "dilations",
1474               _op.get_attr("dilations"))
1475     _execute.record_gradient(
1476       "Conv3DBackpropFilter", _inputs_flat, _attrs, _result, name)
1477     _result, = _result
1478     return _result
1479 
1480   else:
1481     try:
1482       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
1483         _ctx._context_handle, _ctx._eager_context.device_name,
1484         "Conv3DBackpropFilter", name, _ctx._post_execution_callbacks, input,
1485         filter, out_backprop, "strides", strides, "padding", padding,
1486         "dilations", dilations)
1487       return _result
1488     except _core._FallbackException:
1489       return conv3d_backprop_filter_eager_fallback(
1490           input, filter, out_backprop, strides=strides, padding=padding,
1491           dilations=dilations, name=name, ctx=_ctx)
1492     except _core._NotOkStatusException as e:
1493       if name is not None:
1494         message = e.message + " name: " + name
1495       else:
1496         message = e.message
1497       _six.raise_from(_core._status_to_exception(e.code, message), None)
1498 
1499 
1500 def conv3d_backprop_filter_eager_fallback(input, filter, out_backprop, strides, padding, dilations=[1, 1, 1, 1, 1], name=None, ctx=None):
1501   r"""This is the slowpath function for Eager mode.
1502   This is for function conv3d_backprop_filter
1503   """
1504   _ctx = ctx if ctx else _context.context()
1505   if not isinstance(strides, (list, tuple)):
1506     raise TypeError(
1507         "Expected list for 'strides' argument to "
1508         "'conv3d_backprop_filter' Op, not %r." % strides)
1509   strides = [_execute.make_int(_i, "strides") for _i in strides]
1510   padding = _execute.make_str(padding, "padding")
1511   if dilations is None:
1512     dilations = [1, 1, 1, 1, 1]
1513   if not isinstance(dilations, (list, tuple)):
1514     raise TypeError(
1515         "Expected list for 'dilations' argument to "
1516         "'conv3d_backprop_filter' Op, not %r." % dilations)
1517   dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
1518   _attr_T, _inputs_T = _execute.args_to_matching_eager([input, filter, out_backprop], _ctx)
1519   (input, filter, out_backprop) = _inputs_T
1520   _inputs_flat = [input, filter, out_backprop]
1521   _attrs = ("T", _attr_T, "strides", strides, "padding", padding, "dilations",
1522   dilations)
1523   _result = _execute.execute(b"Conv3DBackpropFilter", 1, inputs=_inputs_flat,
1524                              attrs=_attrs, ctx=_ctx, name=name)
1525   _execute.record_gradient(
1526       "Conv3DBackpropFilter", _inputs_flat, _attrs, _result, name)
1527   _result, = _result
1528   return _result
1529 
1530 
1531 @tf_export('nn.conv3d_backprop_filter_v2')
1532 def conv3d_backprop_filter_v2(input, filter_sizes, out_backprop, strides, padding, data_format="NDHWC", dilations=[1, 1, 1, 1, 1], name=None):
1533   r"""Computes the gradients of 3-D convolution with respect to the filter.
1534 
1535   Args:
1536     input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
1537       Shape `[batch, depth, rows, cols, in_channels]`.
1538     filter_sizes: A `Tensor` of type `int32`.
1539       An integer vector representing the tensor shape of `filter`,
1540       where `filter` is a 5-D
1541       `[filter_depth, filter_height, filter_width, in_channels, out_channels]`
1542       tensor.
1543     out_backprop: A `Tensor`. Must have the same type as `input`.
1544       Backprop signal of shape `[batch, out_depth, out_rows, out_cols,
1545       out_channels]`.
1546     strides: A list of `ints` that has length `>= 5`.
1547       1-D tensor of length 5. The stride of the sliding window for each
1548       dimension of `input`. Must have `strides[0] = strides[4] = 1`.
1549     padding: A `string` from: `"SAME", "VALID"`.
1550       The type of padding algorithm to use.
1551     data_format: An optional `string` from: `"NDHWC", "NCDHW"`. Defaults to `"NDHWC"`.
1552       The data format of the input and output data. With the
1553       default format "NDHWC", the data is stored in the order of:
1554           [batch, in_depth, in_height, in_width, in_channels].
1555       Alternatively, the format could be "NCDHW", the data storage order is:
1556           [batch, in_channels, in_depth, in_height, in_width].
1557     dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1, 1]`.
1558       1-D tensor of length 5.  The dilation factor for each dimension of
1559       `input`. If set to k > 1, there will be k-1 skipped cells between each
1560       filter element on that dimension. The dimension order is determined by the
1561       value of `data_format`, see above for details. Dilations in the batch and
1562       depth dimensions must be 1.
1563     name: A name for the operation (optional).
1564 
1565   Returns:
1566     A `Tensor`. Has the same type as `input`.
1567   """
1568   _ctx = _context._context
1569   if _ctx is None or not _ctx._eager_context.is_eager:
1570     if not isinstance(strides, (list, tuple)):
1571       raise TypeError(
1572           "Expected list for 'strides' argument to "
1573           "'conv3d_backprop_filter_v2' Op, not %r." % strides)
1574     strides = [_execute.make_int(_i, "strides") for _i in strides]
1575     padding = _execute.make_str(padding, "padding")
1576     if data_format is None:
1577       data_format = "NDHWC"
1578     data_format = _execute.make_str(data_format, "data_format")
1579     if dilations is None:
1580       dilations = [1, 1, 1, 1, 1]
1581     if not isinstance(dilations, (list, tuple)):
1582       raise TypeError(
1583           "Expected list for 'dilations' argument to "
1584           "'conv3d_backprop_filter_v2' Op, not %r." % dilations)
1585     dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
1586     _, _, _op = _op_def_lib._apply_op_helper(
1587         "Conv3DBackpropFilterV2", input=input, filter_sizes=filter_sizes,
1588         out_backprop=out_backprop, strides=strides, padding=padding,
1589         data_format=data_format, dilations=dilations, name=name)
1590     _result = _op.outputs[:]
1591     _inputs_flat = _op.inputs
1592     _attrs = ("T", _op.get_attr("T"), "strides", _op.get_attr("strides"),
1593               "padding", _op.get_attr("padding"), "data_format",
1594               _op.get_attr("data_format"), "dilations",
1595               _op.get_attr("dilations"))
1596     _execute.record_gradient(
1597       "Conv3DBackpropFilterV2", _inputs_flat, _attrs, _result, name)
1598     _result, = _result
1599     return _result
1600 
1601   else:
1602     try:
1603       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
1604         _ctx._context_handle, _ctx._eager_context.device_name,
1605         "Conv3DBackpropFilterV2", name, _ctx._post_execution_callbacks, input,
1606         filter_sizes, out_backprop, "strides", strides, "padding", padding,
1607         "data_format", data_format, "dilations", dilations)
1608       return _result
1609     except _core._FallbackException:
1610       return conv3d_backprop_filter_v2_eager_fallback(
1611           input, filter_sizes, out_backprop, strides=strides, padding=padding,
1612           data_format=data_format, dilations=dilations, name=name, ctx=_ctx)
1613     except _core._NotOkStatusException as e:
1614       if name is not None:
1615         message = e.message + " name: " + name
1616       else:
1617         message = e.message
1618       _six.raise_from(_core._status_to_exception(e.code, message), None)
1619 
1620 
1621 def conv3d_backprop_filter_v2_eager_fallback(input, filter_sizes, out_backprop, strides, padding, data_format="NDHWC", dilations=[1, 1, 1, 1, 1], name=None, ctx=None):
1622   r"""This is the slowpath function for Eager mode.
1623   This is for function conv3d_backprop_filter_v2
1624   """
1625   _ctx = ctx if ctx else _context.context()
1626   if not isinstance(strides, (list, tuple)):
1627     raise TypeError(
1628         "Expected list for 'strides' argument to "
1629         "'conv3d_backprop_filter_v2' Op, not %r." % strides)
1630   strides = [_execute.make_int(_i, "strides") for _i in strides]
1631   padding = _execute.make_str(padding, "padding")
1632   if data_format is None:
1633     data_format = "NDHWC"
1634   data_format = _execute.make_str(data_format, "data_format")
1635   if dilations is None:
1636     dilations = [1, 1, 1, 1, 1]
1637   if not isinstance(dilations, (list, tuple)):
1638     raise TypeError(
1639         "Expected list for 'dilations' argument to "
1640         "'conv3d_backprop_filter_v2' Op, not %r." % dilations)
1641   dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
1642   _attr_T, _inputs_T = _execute.args_to_matching_eager([input, out_backprop], _ctx)
1643   (input, out_backprop) = _inputs_T
1644   filter_sizes = _ops.convert_to_tensor(filter_sizes, _dtypes.int32)
1645   _inputs_flat = [input, filter_sizes, out_backprop]
1646   _attrs = ("T", _attr_T, "strides", strides, "padding", padding,
1647   "data_format", data_format, "dilations", dilations)
1648   _result = _execute.execute(b"Conv3DBackpropFilterV2", 1,
1649                              inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
1650                              name=name)
1651   _execute.record_gradient(
1652       "Conv3DBackpropFilterV2", _inputs_flat, _attrs, _result, name)
1653   _result, = _result
1654   return _result
1655 
1656 
1657 def conv3d_backprop_input(input, filter, out_backprop, strides, padding, dilations=[1, 1, 1, 1, 1], name=None):
1658   r"""Computes the gradients of 3-D convolution with respect to the input.
1659 
1660   Args:
1661     input: A `Tensor`. Must be one of the following types: `half`, `float32`, `float64`.
1662       Shape `[batch, depth, rows, cols, in_channels]`.
1663     filter: A `Tensor`. Must have the same type as `input`.
1664       Shape `[depth, rows, cols, in_channels, out_channels]`.
1665       `in_channels` must match between `input` and `filter`.
1666     out_backprop: A `Tensor`. Must have the same type as `input`.
1667       Backprop signal of shape `[batch, out_depth, out_rows, out_cols,
1668       out_channels]`.
1669     strides: A list of `ints` that has length `>= 5`.
1670       1-D tensor of length 5. The stride of the sliding window for each
1671       dimension of `input`. Must have `strides[0] = strides[4] = 1`.
1672     padding: A `string` from: `"SAME", "VALID"`.
1673       The type of padding algorithm to use.
1674     dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1, 1]`.
1675     name: A name for the operation (optional).
1676 
1677   Returns:
1678     A `Tensor`. Has the same type as `input`.
1679   """
1680   _ctx = _context._context
1681   if _ctx is None or not _ctx._eager_context.is_eager:
1682     if not isinstance(strides, (list, tuple)):
1683       raise TypeError(
1684           "Expected list for 'strides' argument to "
1685           "'conv3d_backprop_input' Op, not %r." % strides)
1686     strides = [_execute.make_int(_i, "strides") for _i in strides]
1687     padding = _execute.make_str(padding, "padding")
1688     if dilations is None:
1689       dilations = [1, 1, 1, 1, 1]
1690     if not isinstance(dilations, (list, tuple)):
1691       raise TypeError(
1692           "Expected list for 'dilations' argument to "
1693           "'conv3d_backprop_input' Op, not %r." % dilations)
1694     dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
1695     _, _, _op = _op_def_lib._apply_op_helper(
1696         "Conv3DBackpropInput", input=input, filter=filter,
1697         out_backprop=out_backprop, strides=strides, padding=padding,
1698         dilations=dilations, name=name)
1699     _result = _op.outputs[:]
1700     _inputs_flat = _op.inputs
1701     _attrs = ("T", _op.get_attr("T"), "strides", _op.get_attr("strides"),
1702               "padding", _op.get_attr("padding"), "dilations",
1703               _op.get_attr("dilations"))
1704     _execute.record_gradient(
1705       "Conv3DBackpropInput", _inputs_flat, _attrs, _result, name)
1706     _result, = _result
1707     return _result
1708 
1709   else:
1710     try:
1711       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
1712         _ctx._context_handle, _ctx._eager_context.device_name,
1713         "Conv3DBackpropInput", name, _ctx._post_execution_callbacks, input,
1714         filter, out_backprop, "strides", strides, "padding", padding,
1715         "dilations", dilations)
1716       return _result
1717     except _core._FallbackException:
1718       return conv3d_backprop_input_eager_fallback(
1719           input, filter, out_backprop, strides=strides, padding=padding,
1720           dilations=dilations, name=name, ctx=_ctx)
1721     except _core._NotOkStatusException as e:
1722       if name is not None:
1723         message = e.message + " name: " + name
1724       else:
1725         message = e.message
1726       _six.raise_from(_core._status_to_exception(e.code, message), None)
1727 
1728 
1729 def conv3d_backprop_input_eager_fallback(input, filter, out_backprop, strides, padding, dilations=[1, 1, 1, 1, 1], name=None, ctx=None):
1730   r"""This is the slowpath function for Eager mode.
1731   This is for function conv3d_backprop_input
1732   """
1733   _ctx = ctx if ctx else _context.context()
1734   if not isinstance(strides, (list, tuple)):
1735     raise TypeError(
1736         "Expected list for 'strides' argument to "
1737         "'conv3d_backprop_input' Op, not %r." % strides)
1738   strides = [_execute.make_int(_i, "strides") for _i in strides]
1739   padding = _execute.make_str(padding, "padding")
1740   if dilations is None:
1741     dilations = [1, 1, 1, 1, 1]
1742   if not isinstance(dilations, (list, tuple)):
1743     raise TypeError(
1744         "Expected list for 'dilations' argument to "
1745         "'conv3d_backprop_input' Op, not %r." % dilations)
1746   dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
1747   _attr_T, _inputs_T = _execute.args_to_matching_eager([input, filter, out_backprop], _ctx)
1748   (input, filter, out_backprop) = _inputs_T
1749   _inputs_flat = [input, filter, out_backprop]
1750   _attrs = ("T", _attr_T, "strides", strides, "padding", padding, "dilations",
1751   dilations)
1752   _result = _execute.execute(b"Conv3DBackpropInput", 1, inputs=_inputs_flat,
1753                              attrs=_attrs, ctx=_ctx, name=name)
1754   _execute.record_gradient(
1755       "Conv3DBackpropInput", _inputs_flat, _attrs, _result, name)
1756   _result, = _result
1757   return _result
1758 
1759 
1760 def conv3d_backprop_input_v2(input_sizes, filter, out_backprop, strides, padding, data_format="NDHWC", dilations=[1, 1, 1, 1, 1], name=None):
1761   r"""Computes the gradients of 3-D convolution with respect to the input.
1762 
1763   Args:
1764     input_sizes: A `Tensor`. Must be one of the following types: `int32`, `int64`.
1765       An integer vector representing the tensor shape of `input`,
1766       where `input` is a 5-D
1767       `[batch, depth, rows, cols, in_channels]` tensor.
1768     filter: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
1769       Shape `[depth, rows, cols, in_channels, out_channels]`.
1770       `in_channels` must match between `input` and `filter`.
1771     out_backprop: A `Tensor`. Must have the same type as `filter`.
1772       Backprop signal of shape `[batch, out_depth, out_rows, out_cols,
1773       out_channels]`.
1774     strides: A list of `ints` that has length `>= 5`.
1775       1-D tensor of length 5. The stride of the sliding window for each
1776       dimension of `input`. Must have `strides[0] = strides[4] = 1`.
1777     padding: A `string` from: `"SAME", "VALID"`.
1778       The type of padding algorithm to use.
1779     data_format: An optional `string` from: `"NDHWC", "NCDHW"`. Defaults to `"NDHWC"`.
1780       The data format of the input and output data. With the
1781       default format "NDHWC", the data is stored in the order of:
1782           [batch, in_depth, in_height, in_width, in_channels].
1783       Alternatively, the format could be "NCDHW", the data storage order is:
1784           [batch, in_channels, in_depth, in_height, in_width].
1785     dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1, 1]`.
1786       1-D tensor of length 5.  The dilation factor for each dimension of
1787       `input`. If set to k > 1, there will be k-1 skipped cells between each
1788       filter element on that dimension. The dimension order is determined by the
1789       value of `data_format`, see above for details. Dilations in the batch and
1790       depth dimensions must be 1.
1791     name: A name for the operation (optional).
1792 
1793   Returns:
1794     A `Tensor`. Has the same type as `filter`.
1795   """
1796   _ctx = _context._context
1797   if _ctx is None or not _ctx._eager_context.is_eager:
1798     if not isinstance(strides, (list, tuple)):
1799       raise TypeError(
1800           "Expected list for 'strides' argument to "
1801           "'conv3d_backprop_input_v2' Op, not %r." % strides)
1802     strides = [_execute.make_int(_i, "strides") for _i in strides]
1803     padding = _execute.make_str(padding, "padding")
1804     if data_format is None:
1805       data_format = "NDHWC"
1806     data_format = _execute.make_str(data_format, "data_format")
1807     if dilations is None:
1808       dilations = [1, 1, 1, 1, 1]
1809     if not isinstance(dilations, (list, tuple)):
1810       raise TypeError(
1811           "Expected list for 'dilations' argument to "
1812           "'conv3d_backprop_input_v2' Op, not %r." % dilations)
1813     dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
1814     _, _, _op = _op_def_lib._apply_op_helper(
1815         "Conv3DBackpropInputV2", input_sizes=input_sizes, filter=filter,
1816         out_backprop=out_backprop, strides=strides, padding=padding,
1817         data_format=data_format, dilations=dilations, name=name)
1818     _result = _op.outputs[:]
1819     _inputs_flat = _op.inputs
1820     _attrs = ("T", _op.get_attr("T"), "strides", _op.get_attr("strides"),
1821               "padding", _op.get_attr("padding"), "data_format",
1822               _op.get_attr("data_format"), "dilations",
1823               _op.get_attr("dilations"), "Tshape", _op.get_attr("Tshape"))
1824     _execute.record_gradient(
1825       "Conv3DBackpropInputV2", _inputs_flat, _attrs, _result, name)
1826     _result, = _result
1827     return _result
1828 
1829   else:
1830     try:
1831       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
1832         _ctx._context_handle, _ctx._eager_context.device_name,
1833         "Conv3DBackpropInputV2", name, _ctx._post_execution_callbacks,
1834         input_sizes, filter, out_backprop, "strides", strides, "padding",
1835         padding, "data_format", data_format, "dilations", dilations)
1836       return _result
1837     except _core._FallbackException:
1838       return conv3d_backprop_input_v2_eager_fallback(
1839           input_sizes, filter, out_backprop, strides=strides, padding=padding,
1840           data_format=data_format, dilations=dilations, name=name, ctx=_ctx)
1841     except _core._NotOkStatusException as e:
1842       if name is not None:
1843         message = e.message + " name: " + name
1844       else:
1845         message = e.message
1846       _six.raise_from(_core._status_to_exception(e.code, message), None)
1847 
1848 
1849 def conv3d_backprop_input_v2_eager_fallback(input_sizes, filter, out_backprop, strides, padding, data_format="NDHWC", dilations=[1, 1, 1, 1, 1], name=None, ctx=None):
1850   r"""This is the slowpath function for Eager mode.
1851   This is for function conv3d_backprop_input_v2
1852   """
1853   _ctx = ctx if ctx else _context.context()
1854   if not isinstance(strides, (list, tuple)):
1855     raise TypeError(
1856         "Expected list for 'strides' argument to "
1857         "'conv3d_backprop_input_v2' Op, not %r." % strides)
1858   strides = [_execute.make_int(_i, "strides") for _i in strides]
1859   padding = _execute.make_str(padding, "padding")
1860   if data_format is None:
1861     data_format = "NDHWC"
1862   data_format = _execute.make_str(data_format, "data_format")
1863   if dilations is None:
1864     dilations = [1, 1, 1, 1, 1]
1865   if not isinstance(dilations, (list, tuple)):
1866     raise TypeError(
1867         "Expected list for 'dilations' argument to "
1868         "'conv3d_backprop_input_v2' Op, not %r." % dilations)
1869   dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
1870   _attr_T, _inputs_T = _execute.args_to_matching_eager([filter, out_backprop], _ctx)
1871   (filter, out_backprop) = _inputs_T
1872   _attr_Tshape, (input_sizes,) = _execute.args_to_matching_eager([input_sizes], _ctx, _dtypes.int32)
1873   _inputs_flat = [input_sizes, filter, out_backprop]
1874   _attrs = ("T", _attr_T, "strides", strides, "padding", padding,
1875   "data_format", data_format, "dilations", dilations, "Tshape", _attr_Tshape)
1876   _result = _execute.execute(b"Conv3DBackpropInputV2", 1, inputs=_inputs_flat,
1877                              attrs=_attrs, ctx=_ctx, name=name)
1878   _execute.record_gradient(
1879       "Conv3DBackpropInputV2", _inputs_flat, _attrs, _result, name)
1880   _result, = _result
1881   return _result
1882 
1883 
1884 def data_format_dim_map(x, src_format="NHWC", dst_format="NCHW", name=None):
1885   r"""Returns the dimension index in the destination data format given the one in
1886 
1887   the source data format.
1888 
1889   Args:
1890     x: A `Tensor`. Must be one of the following types: `int32`, `int64`.
1891       A Tensor with each element as a dimension index in source data format.
1892       Must be in the range [-4, 4).
1893     src_format: An optional `string`. Defaults to `"NHWC"`.
1894       source data format.
1895     dst_format: An optional `string`. Defaults to `"NCHW"`.
1896       destination data format.
1897     name: A name for the operation (optional).
1898 
1899   Returns:
1900     A `Tensor`. Has the same type as `x`.
1901   """
1902   _ctx = _context._context
1903   if _ctx is None or not _ctx._eager_context.is_eager:
1904     if src_format is None:
1905       src_format = "NHWC"
1906     src_format = _execute.make_str(src_format, "src_format")
1907     if dst_format is None:
1908       dst_format = "NCHW"
1909     dst_format = _execute.make_str(dst_format, "dst_format")
1910     _, _, _op = _op_def_lib._apply_op_helper(
1911         "DataFormatDimMap", x=x, src_format=src_format, dst_format=dst_format,
1912         name=name)
1913     _result = _op.outputs[:]
1914     _inputs_flat = _op.inputs
1915     _attrs = ("T", _op.get_attr("T"), "src_format",
1916               _op.get_attr("src_format"), "dst_format",
1917               _op.get_attr("dst_format"))
1918     _execute.record_gradient(
1919       "DataFormatDimMap", _inputs_flat, _attrs, _result, name)
1920     _result, = _result
1921     return _result
1922 
1923   else:
1924     try:
1925       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
1926         _ctx._context_handle, _ctx._eager_context.device_name,
1927         "DataFormatDimMap", name, _ctx._post_execution_callbacks, x,
1928         "src_format", src_format, "dst_format", dst_format)
1929       return _result
1930     except _core._FallbackException:
1931       return data_format_dim_map_eager_fallback(
1932           x, src_format=src_format, dst_format=dst_format, name=name,
1933           ctx=_ctx)
1934     except _core._NotOkStatusException as e:
1935       if name is not None:
1936         message = e.message + " name: " + name
1937       else:
1938         message = e.message
1939       _six.raise_from(_core._status_to_exception(e.code, message), None)
1940 
1941 
1942 def data_format_dim_map_eager_fallback(x, src_format="NHWC", dst_format="NCHW", name=None, ctx=None):
1943   r"""This is the slowpath function for Eager mode.
1944   This is for function data_format_dim_map
1945   """
1946   _ctx = ctx if ctx else _context.context()
1947   if src_format is None:
1948     src_format = "NHWC"
1949   src_format = _execute.make_str(src_format, "src_format")
1950   if dst_format is None:
1951     dst_format = "NCHW"
1952   dst_format = _execute.make_str(dst_format, "dst_format")
1953   _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx, _dtypes.int32)
1954   _inputs_flat = [x]
1955   _attrs = ("T", _attr_T, "src_format", src_format, "dst_format", dst_format)
1956   _result = _execute.execute(b"DataFormatDimMap", 1, inputs=_inputs_flat,
1957                              attrs=_attrs, ctx=_ctx, name=name)
1958   _execute.record_gradient(
1959       "DataFormatDimMap", _inputs_flat, _attrs, _result, name)
1960   _result, = _result
1961   return _result
1962 
1963 
1964 def data_format_vec_permute(x, src_format="NHWC", dst_format="NCHW", name=None):
1965   r"""Returns the permuted vector/tensor in the destination data format given the
1966 
1967   one in the source data format.
1968 
1969   Args:
1970     x: A `Tensor`. Must be one of the following types: `int32`, `int64`.
1971       Vector of size 4 or Tensor of shape (4, 2) in source data format.
1972     src_format: An optional `string`. Defaults to `"NHWC"`.
1973       source data format.
1974     dst_format: An optional `string`. Defaults to `"NCHW"`.
1975       destination data format.
1976     name: A name for the operation (optional).
1977 
1978   Returns:
1979     A `Tensor`. Has the same type as `x`.
1980   """
1981   _ctx = _context._context
1982   if _ctx is None or not _ctx._eager_context.is_eager:
1983     if src_format is None:
1984       src_format = "NHWC"
1985     src_format = _execute.make_str(src_format, "src_format")
1986     if dst_format is None:
1987       dst_format = "NCHW"
1988     dst_format = _execute.make_str(dst_format, "dst_format")
1989     _, _, _op = _op_def_lib._apply_op_helper(
1990         "DataFormatVecPermute", x=x, src_format=src_format,
1991         dst_format=dst_format, name=name)
1992     _result = _op.outputs[:]
1993     _inputs_flat = _op.inputs
1994     _attrs = ("T", _op.get_attr("T"), "src_format",
1995               _op.get_attr("src_format"), "dst_format",
1996               _op.get_attr("dst_format"))
1997     _execute.record_gradient(
1998       "DataFormatVecPermute", _inputs_flat, _attrs, _result, name)
1999     _result, = _result
2000     return _result
2001 
2002   else:
2003     try:
2004       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
2005         _ctx._context_handle, _ctx._eager_context.device_name,
2006         "DataFormatVecPermute", name, _ctx._post_execution_callbacks, x,
2007         "src_format", src_format, "dst_format", dst_format)
2008       return _result
2009     except _core._FallbackException:
2010       return data_format_vec_permute_eager_fallback(
2011           x, src_format=src_format, dst_format=dst_format, name=name,
2012           ctx=_ctx)
2013     except _core._NotOkStatusException as e:
2014       if name is not None:
2015         message = e.message + " name: " + name
2016       else:
2017         message = e.message
2018       _six.raise_from(_core._status_to_exception(e.code, message), None)
2019 
2020 
2021 def data_format_vec_permute_eager_fallback(x, src_format="NHWC", dst_format="NCHW", name=None, ctx=None):
2022   r"""This is the slowpath function for Eager mode.
2023   This is for function data_format_vec_permute
2024   """
2025   _ctx = ctx if ctx else _context.context()
2026   if src_format is None:
2027     src_format = "NHWC"
2028   src_format = _execute.make_str(src_format, "src_format")
2029   if dst_format is None:
2030     dst_format = "NCHW"
2031   dst_format = _execute.make_str(dst_format, "dst_format")
2032   _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx, _dtypes.int32)
2033   _inputs_flat = [x]
2034   _attrs = ("T", _attr_T, "src_format", src_format, "dst_format", dst_format)
2035   _result = _execute.execute(b"DataFormatVecPermute", 1, inputs=_inputs_flat,
2036                              attrs=_attrs, ctx=_ctx, name=name)
2037   _execute.record_gradient(
2038       "DataFormatVecPermute", _inputs_flat, _attrs, _result, name)
2039   _result, = _result
2040   return _result
2041 
2042 
2043 @tf_export('nn.depthwise_conv2d_native')
2044 def depthwise_conv2d_native(input, filter, strides, padding, data_format="NHWC", dilations=[1, 1, 1, 1], name=None):
2045   r"""Computes a 2-D depthwise convolution given 4-D `input` and `filter` tensors.
2046 
2047   Given an input tensor of shape `[batch, in_height, in_width, in_channels]`
2048   and a filter / kernel tensor of shape
2049   `[filter_height, filter_width, in_channels, channel_multiplier]`, containing
2050   `in_channels` convolutional filters of depth 1, `depthwise_conv2d` applies
2051   a different filter to each input channel (expanding from 1 channel to
2052   `channel_multiplier` channels for each), then concatenates the results
2053   together. Thus, the output has `in_channels * channel_multiplier` channels.
2054 
2055   ```
2056   for k in 0..in_channels-1
2057     for q in 0..channel_multiplier-1
2058       output[b, i, j, k * channel_multiplier + q] =
2059         sum_{di, dj} input[b, strides[1] * i + di, strides[2] * j + dj, k] *
2060                           filter[di, dj, k, q]
2061   ```
2062 
2063   Must have `strides[0] = strides[3] = 1`.  For the most common case of the same
2064   horizontal and vertices strides, `strides = [1, stride, stride, 1]`.
2065 
2066   Args:
2067     input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
2068     filter: A `Tensor`. Must have the same type as `input`.
2069     strides: A list of `ints`.
2070       1-D of length 4.  The stride of the sliding window for each dimension
2071       of `input`.
2072     padding: A `string` from: `"SAME", "VALID"`.
2073       The type of padding algorithm to use.
2074     data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`.
2075       Specify the data format of the input and output data. With the
2076       default format "NHWC", the data is stored in the order of:
2077           [batch, height, width, channels].
2078       Alternatively, the format could be "NCHW", the data storage order of:
2079           [batch, channels, height, width].
2080     dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
2081       1-D tensor of length 4.  The dilation factor for each dimension of
2082       `input`. If set to k > 1, there will be k-1 skipped cells between each filter
2083       element on that dimension. The dimension order is determined by the value of
2084       `data_format`, see above for details. Dilations in the batch and depth
2085       dimensions must be 1.
2086     name: A name for the operation (optional).
2087 
2088   Returns:
2089     A `Tensor`. Has the same type as `input`.
2090   """
2091   _ctx = _context._context
2092   if _ctx is None or not _ctx._eager_context.is_eager:
2093     if not isinstance(strides, (list, tuple)):
2094       raise TypeError(
2095           "Expected list for 'strides' argument to "
2096           "'depthwise_conv2d_native' Op, not %r." % strides)
2097     strides = [_execute.make_int(_i, "strides") for _i in strides]
2098     padding = _execute.make_str(padding, "padding")
2099     if data_format is None:
2100       data_format = "NHWC"
2101     data_format = _execute.make_str(data_format, "data_format")
2102     if dilations is None:
2103       dilations = [1, 1, 1, 1]
2104     if not isinstance(dilations, (list, tuple)):
2105       raise TypeError(
2106           "Expected list for 'dilations' argument to "
2107           "'depthwise_conv2d_native' Op, not %r." % dilations)
2108     dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
2109     _, _, _op = _op_def_lib._apply_op_helper(
2110         "DepthwiseConv2dNative", input=input, filter=filter, strides=strides,
2111         padding=padding, data_format=data_format, dilations=dilations,
2112         name=name)
2113     _result = _op.outputs[:]
2114     _inputs_flat = _op.inputs
2115     _attrs = ("T", _op.get_attr("T"), "strides", _op.get_attr("strides"),
2116               "padding", _op.get_attr("padding"), "data_format",
2117               _op.get_attr("data_format"), "dilations",
2118               _op.get_attr("dilations"))
2119     _execute.record_gradient(
2120       "DepthwiseConv2dNative", _inputs_flat, _attrs, _result, name)
2121     _result, = _result
2122     return _result
2123 
2124   else:
2125     try:
2126       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
2127         _ctx._context_handle, _ctx._eager_context.device_name,
2128         "DepthwiseConv2dNative", name, _ctx._post_execution_callbacks, input,
2129         filter, "strides", strides, "padding", padding, "data_format",
2130         data_format, "dilations", dilations)
2131       return _result
2132     except _core._FallbackException:
2133       return depthwise_conv2d_native_eager_fallback(
2134           input, filter, strides=strides, padding=padding,
2135           data_format=data_format, dilations=dilations, name=name, ctx=_ctx)
2136     except _core._NotOkStatusException as e:
2137       if name is not None:
2138         message = e.message + " name: " + name
2139       else:
2140         message = e.message
2141       _six.raise_from(_core._status_to_exception(e.code, message), None)
2142 
2143 
2144 def depthwise_conv2d_native_eager_fallback(input, filter, strides, padding, data_format="NHWC", dilations=[1, 1, 1, 1], name=None, ctx=None):
2145   r"""This is the slowpath function for Eager mode.
2146   This is for function depthwise_conv2d_native
2147   """
2148   _ctx = ctx if ctx else _context.context()
2149   if not isinstance(strides, (list, tuple)):
2150     raise TypeError(
2151         "Expected list for 'strides' argument to "
2152         "'depthwise_conv2d_native' Op, not %r." % strides)
2153   strides = [_execute.make_int(_i, "strides") for _i in strides]
2154   padding = _execute.make_str(padding, "padding")
2155   if data_format is None:
2156     data_format = "NHWC"
2157   data_format = _execute.make_str(data_format, "data_format")
2158   if dilations is None:
2159     dilations = [1, 1, 1, 1]
2160   if not isinstance(dilations, (list, tuple)):
2161     raise TypeError(
2162         "Expected list for 'dilations' argument to "
2163         "'depthwise_conv2d_native' Op, not %r." % dilations)
2164   dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
2165   _attr_T, _inputs_T = _execute.args_to_matching_eager([input, filter], _ctx)
2166   (input, filter) = _inputs_T
2167   _inputs_flat = [input, filter]
2168   _attrs = ("T", _attr_T, "strides", strides, "padding", padding,
2169   "data_format", data_format, "dilations", dilations)
2170   _result = _execute.execute(b"DepthwiseConv2dNative", 1, inputs=_inputs_flat,
2171                              attrs=_attrs, ctx=_ctx, name=name)
2172   _execute.record_gradient(
2173       "DepthwiseConv2dNative", _inputs_flat, _attrs, _result, name)
2174   _result, = _result
2175   return _result
2176 
2177 
2178 @tf_export('nn.depthwise_conv2d_native_backprop_filter')
2179 def depthwise_conv2d_native_backprop_filter(input, filter_sizes, out_backprop, strides, padding, data_format="NHWC", dilations=[1, 1, 1, 1], name=None):
2180   r"""Computes the gradients of depthwise convolution with respect to the filter.
2181 
2182   Args:
2183     input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
2184       4-D with shape based on `data_format`.  For example, if
2185       `data_format` is 'NHWC' then `input` is a 4-D `[batch, in_height,
2186       in_width, in_channels]` tensor.
2187     filter_sizes: A `Tensor` of type `int32`.
2188       An integer vector representing the tensor shape of `filter`,
2189       where `filter` is a 4-D
2190       `[filter_height, filter_width, in_channels, depthwise_multiplier]` tensor.
2191     out_backprop: A `Tensor`. Must have the same type as `input`.
2192       4-D with shape  based on `data_format`.
2193       For example, if `data_format` is 'NHWC' then
2194       out_backprop shape is `[batch, out_height, out_width, out_channels]`.
2195       Gradients w.r.t. the output of the convolution.
2196     strides: A list of `ints`.
2197       The stride of the sliding window for each dimension of the input
2198       of the convolution.
2199     padding: A `string` from: `"SAME", "VALID"`.
2200       The type of padding algorithm to use.
2201     data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`.
2202       Specify the data format of the input and output data. With the
2203       default format "NHWC", the data is stored in the order of:
2204           [batch, height, width, channels].
2205       Alternatively, the format could be "NCHW", the data storage order of:
2206           [batch, channels, height, width].
2207     dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
2208       1-D tensor of length 4.  The dilation factor for each dimension of
2209       `input`. If set to k > 1, there will be k-1 skipped cells between each filter
2210       element on that dimension. The dimension order is determined by the value of
2211       `data_format`, see above for details. Dilations in the batch and depth
2212       dimensions must be 1.
2213     name: A name for the operation (optional).
2214 
2215   Returns:
2216     A `Tensor`. Has the same type as `input`.
2217   """
2218   _ctx = _context._context
2219   if _ctx is None or not _ctx._eager_context.is_eager:
2220     if not isinstance(strides, (list, tuple)):
2221       raise TypeError(
2222           "Expected list for 'strides' argument to "
2223           "'depthwise_conv2d_native_backprop_filter' Op, not %r." % strides)
2224     strides = [_execute.make_int(_i, "strides") for _i in strides]
2225     padding = _execute.make_str(padding, "padding")
2226     if data_format is None:
2227       data_format = "NHWC"
2228     data_format = _execute.make_str(data_format, "data_format")
2229     if dilations is None:
2230       dilations = [1, 1, 1, 1]
2231     if not isinstance(dilations, (list, tuple)):
2232       raise TypeError(
2233           "Expected list for 'dilations' argument to "
2234           "'depthwise_conv2d_native_backprop_filter' Op, not %r." % dilations)
2235     dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
2236     _, _, _op = _op_def_lib._apply_op_helper(
2237         "DepthwiseConv2dNativeBackpropFilter", input=input,
2238         filter_sizes=filter_sizes, out_backprop=out_backprop, strides=strides,
2239         padding=padding, data_format=data_format, dilations=dilations,
2240         name=name)
2241     _result = _op.outputs[:]
2242     _inputs_flat = _op.inputs
2243     _attrs = ("T", _op.get_attr("T"), "strides", _op.get_attr("strides"),
2244               "padding", _op.get_attr("padding"), "data_format",
2245               _op.get_attr("data_format"), "dilations",
2246               _op.get_attr("dilations"))
2247     _execute.record_gradient(
2248       "DepthwiseConv2dNativeBackpropFilter", _inputs_flat, _attrs, _result, name)
2249     _result, = _result
2250     return _result
2251 
2252   else:
2253     try:
2254       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
2255         _ctx._context_handle, _ctx._eager_context.device_name,
2256         "DepthwiseConv2dNativeBackpropFilter", name,
2257         _ctx._post_execution_callbacks, input, filter_sizes, out_backprop,
2258         "strides", strides, "padding", padding, "data_format", data_format,
2259         "dilations", dilations)
2260       return _result
2261     except _core._FallbackException:
2262       return depthwise_conv2d_native_backprop_filter_eager_fallback(
2263           input, filter_sizes, out_backprop, strides=strides, padding=padding,
2264           data_format=data_format, dilations=dilations, name=name, ctx=_ctx)
2265     except _core._NotOkStatusException as e:
2266       if name is not None:
2267         message = e.message + " name: " + name
2268       else:
2269         message = e.message
2270       _six.raise_from(_core._status_to_exception(e.code, message), None)
2271 
2272 
2273 def depthwise_conv2d_native_backprop_filter_eager_fallback(input, filter_sizes, out_backprop, strides, padding, data_format="NHWC", dilations=[1, 1, 1, 1], name=None, ctx=None):
2274   r"""This is the slowpath function for Eager mode.
2275   This is for function depthwise_conv2d_native_backprop_filter
2276   """
2277   _ctx = ctx if ctx else _context.context()
2278   if not isinstance(strides, (list, tuple)):
2279     raise TypeError(
2280         "Expected list for 'strides' argument to "
2281         "'depthwise_conv2d_native_backprop_filter' Op, not %r." % strides)
2282   strides = [_execute.make_int(_i, "strides") for _i in strides]
2283   padding = _execute.make_str(padding, "padding")
2284   if data_format is None:
2285     data_format = "NHWC"
2286   data_format = _execute.make_str(data_format, "data_format")
2287   if dilations is None:
2288     dilations = [1, 1, 1, 1]
2289   if not isinstance(dilations, (list, tuple)):
2290     raise TypeError(
2291         "Expected list for 'dilations' argument to "
2292         "'depthwise_conv2d_native_backprop_filter' Op, not %r." % dilations)
2293   dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
2294   _attr_T, _inputs_T = _execute.args_to_matching_eager([input, out_backprop], _ctx)
2295   (input, out_backprop) = _inputs_T
2296   filter_sizes = _ops.convert_to_tensor(filter_sizes, _dtypes.int32)
2297   _inputs_flat = [input, filter_sizes, out_backprop]
2298   _attrs = ("T", _attr_T, "strides", strides, "padding", padding,
2299   "data_format", data_format, "dilations", dilations)
2300   _result = _execute.execute(b"DepthwiseConv2dNativeBackpropFilter", 1,
2301                              inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
2302                              name=name)
2303   _execute.record_gradient(
2304       "DepthwiseConv2dNativeBackpropFilter", _inputs_flat, _attrs, _result, name)
2305   _result, = _result
2306   return _result
2307 
2308 
2309 @tf_export('nn.depthwise_conv2d_native_backprop_input')
2310 def depthwise_conv2d_native_backprop_input(input_sizes, filter, out_backprop, strides, padding, data_format="NHWC", dilations=[1, 1, 1, 1], name=None):
2311   r"""Computes the gradients of depthwise convolution with respect to the input.
2312 
2313   Args:
2314     input_sizes: A `Tensor` of type `int32`.
2315       An integer vector representing the shape of `input`, based
2316       on `data_format`.  For example, if `data_format` is 'NHWC' then
2317        `input` is a 4-D `[batch, height, width, channels]` tensor.
2318     filter: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
2319       4-D with shape
2320       `[filter_height, filter_width, in_channels, depthwise_multiplier]`.
2321     out_backprop: A `Tensor`. Must have the same type as `filter`.
2322       4-D with shape  based on `data_format`.
2323       For example, if `data_format` is 'NHWC' then
2324       out_backprop shape is `[batch, out_height, out_width, out_channels]`.
2325       Gradients w.r.t. the output of the convolution.
2326     strides: A list of `ints`.
2327       The stride of the sliding window for each dimension of the input
2328       of the convolution.
2329     padding: A `string` from: `"SAME", "VALID"`.
2330       The type of padding algorithm to use.
2331     data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`.
2332       Specify the data format of the input and output data. With the
2333       default format "NHWC", the data is stored in the order of:
2334           [batch, height, width, channels].
2335       Alternatively, the format could be "NCHW", the data storage order of:
2336           [batch, channels, height, width].
2337     dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
2338       1-D tensor of length 4.  The dilation factor for each dimension of
2339       `input`. If set to k > 1, there will be k-1 skipped cells between each filter
2340       element on that dimension. The dimension order is determined by the value of
2341       `data_format`, see above for details. Dilations in the batch and depth
2342       dimensions must be 1.
2343     name: A name for the operation (optional).
2344 
2345   Returns:
2346     A `Tensor`. Has the same type as `filter`.
2347   """
2348   _ctx = _context._context
2349   if _ctx is None or not _ctx._eager_context.is_eager:
2350     if not isinstance(strides, (list, tuple)):
2351       raise TypeError(
2352           "Expected list for 'strides' argument to "
2353           "'depthwise_conv2d_native_backprop_input' Op, not %r." % strides)
2354     strides = [_execute.make_int(_i, "strides") for _i in strides]
2355     padding = _execute.make_str(padding, "padding")
2356     if data_format is None:
2357       data_format = "NHWC"
2358     data_format = _execute.make_str(data_format, "data_format")
2359     if dilations is None:
2360       dilations = [1, 1, 1, 1]
2361     if not isinstance(dilations, (list, tuple)):
2362       raise TypeError(
2363           "Expected list for 'dilations' argument to "
2364           "'depthwise_conv2d_native_backprop_input' Op, not %r." % dilations)
2365     dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
2366     _, _, _op = _op_def_lib._apply_op_helper(
2367         "DepthwiseConv2dNativeBackpropInput", input_sizes=input_sizes,
2368         filter=filter, out_backprop=out_backprop, strides=strides,
2369         padding=padding, data_format=data_format, dilations=dilations,
2370         name=name)
2371     _result = _op.outputs[:]
2372     _inputs_flat = _op.inputs
2373     _attrs = ("T", _op.get_attr("T"), "strides", _op.get_attr("strides"),
2374               "padding", _op.get_attr("padding"), "data_format",
2375               _op.get_attr("data_format"), "dilations",
2376               _op.get_attr("dilations"))
2377     _execute.record_gradient(
2378       "DepthwiseConv2dNativeBackpropInput", _inputs_flat, _attrs, _result, name)
2379     _result, = _result
2380     return _result
2381 
2382   else:
2383     try:
2384       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
2385         _ctx._context_handle, _ctx._eager_context.device_name,
2386         "DepthwiseConv2dNativeBackpropInput", name,
2387         _ctx._post_execution_callbacks, input_sizes, filter, out_backprop,
2388         "strides", strides, "padding", padding, "data_format", data_format,
2389         "dilations", dilations)
2390       return _result
2391     except _core._FallbackException:
2392       return depthwise_conv2d_native_backprop_input_eager_fallback(
2393           input_sizes, filter, out_backprop, strides=strides, padding=padding,
2394           data_format=data_format, dilations=dilations, name=name, ctx=_ctx)
2395     except _core._NotOkStatusException as e:
2396       if name is not None:
2397         message = e.message + " name: " + name
2398       else:
2399         message = e.message
2400       _six.raise_from(_core._status_to_exception(e.code, message), None)
2401 
2402 
2403 def depthwise_conv2d_native_backprop_input_eager_fallback(input_sizes, filter, out_backprop, strides, padding, data_format="NHWC", dilations=[1, 1, 1, 1], name=None, ctx=None):
2404   r"""This is the slowpath function for Eager mode.
2405   This is for function depthwise_conv2d_native_backprop_input
2406   """
2407   _ctx = ctx if ctx else _context.context()
2408   if not isinstance(strides, (list, tuple)):
2409     raise TypeError(
2410         "Expected list for 'strides' argument to "
2411         "'depthwise_conv2d_native_backprop_input' Op, not %r." % strides)
2412   strides = [_execute.make_int(_i, "strides") for _i in strides]
2413   padding = _execute.make_str(padding, "padding")
2414   if data_format is None:
2415     data_format = "NHWC"
2416   data_format = _execute.make_str(data_format, "data_format")
2417   if dilations is None:
2418     dilations = [1, 1, 1, 1]
2419   if not isinstance(dilations, (list, tuple)):
2420     raise TypeError(
2421         "Expected list for 'dilations' argument to "
2422         "'depthwise_conv2d_native_backprop_input' Op, not %r." % dilations)
2423   dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
2424   _attr_T, _inputs_T = _execute.args_to_matching_eager([filter, out_backprop], _ctx)
2425   (filter, out_backprop) = _inputs_T
2426   input_sizes = _ops.convert_to_tensor(input_sizes, _dtypes.int32)
2427   _inputs_flat = [input_sizes, filter, out_backprop]
2428   _attrs = ("T", _attr_T, "strides", strides, "padding", padding,
2429   "data_format", data_format, "dilations", dilations)
2430   _result = _execute.execute(b"DepthwiseConv2dNativeBackpropInput", 1,
2431                              inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
2432                              name=name)
2433   _execute.record_gradient(
2434       "DepthwiseConv2dNativeBackpropInput", _inputs_flat, _attrs, _result, name)
2435   _result, = _result
2436   return _result
2437 
2438 
2439 @tf_export('nn.dilation2d')
2440 def dilation2d(input, filter, strides, rates, padding, name=None):
2441   r"""Computes the grayscale dilation of 4-D `input` and 3-D `filter` tensors.
2442 
2443   The `input` tensor has shape `[batch, in_height, in_width, depth]` and the
2444   `filter` tensor has shape `[filter_height, filter_width, depth]`, i.e., each
2445   input channel is processed independently of the others with its own structuring
2446   function. The `output` tensor has shape
2447   `[batch, out_height, out_width, depth]`. The spatial dimensions of the output
2448   tensor depend on the `padding` algorithm. We currently only support the default
2449   "NHWC" `data_format`.
2450 
2451   In detail, the grayscale morphological 2-D dilation is the max-sum correlation
2452   (for consistency with `conv2d`, we use unmirrored filters):
2453 
2454       output[b, y, x, c] =
2455          max_{dy, dx} input[b,
2456                             strides[1] * y + rates[1] * dy,
2457                             strides[2] * x + rates[2] * dx,
2458                             c] +
2459                       filter[dy, dx, c]
2460 
2461   Max-pooling is a special case when the filter has size equal to the pooling
2462   kernel size and contains all zeros.
2463 
2464   Note on duality: The dilation of `input` by the `filter` is equal to the
2465   negation of the erosion of `-input` by the reflected `filter`.
2466 
2467   Args:
2468     input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
2469       4-D with shape `[batch, in_height, in_width, depth]`.
2470     filter: A `Tensor`. Must have the same type as `input`.
2471       3-D with shape `[filter_height, filter_width, depth]`.
2472     strides: A list of `ints` that has length `>= 4`.
2473       The stride of the sliding window for each dimension of the input
2474       tensor. Must be: `[1, stride_height, stride_width, 1]`.
2475     rates: A list of `ints` that has length `>= 4`.
2476       The input stride for atrous morphological dilation. Must be:
2477       `[1, rate_height, rate_width, 1]`.
2478     padding: A `string` from: `"SAME", "VALID"`.
2479       The type of padding algorithm to use.
2480     name: A name for the operation (optional).
2481 
2482   Returns:
2483     A `Tensor`. Has the same type as `input`.
2484   """
2485   _ctx = _context._context
2486   if _ctx is None or not _ctx._eager_context.is_eager:
2487     if not isinstance(strides, (list, tuple)):
2488       raise TypeError(
2489           "Expected list for 'strides' argument to "
2490           "'dilation2d' Op, not %r." % strides)
2491     strides = [_execute.make_int(_i, "strides") for _i in strides]
2492     if not isinstance(rates, (list, tuple)):
2493       raise TypeError(
2494           "Expected list for 'rates' argument to "
2495           "'dilation2d' Op, not %r." % rates)
2496     rates = [_execute.make_int(_i, "rates") for _i in rates]
2497     padding = _execute.make_str(padding, "padding")
2498     _, _, _op = _op_def_lib._apply_op_helper(
2499         "Dilation2D", input=input, filter=filter, strides=strides,
2500         rates=rates, padding=padding, name=name)
2501     _result = _op.outputs[:]
2502     _inputs_flat = _op.inputs
2503     _attrs = ("T", _op.get_attr("T"), "strides", _op.get_attr("strides"),
2504               "rates", _op.get_attr("rates"), "padding",
2505               _op.get_attr("padding"))
2506     _execute.record_gradient(
2507       "Dilation2D", _inputs_flat, _attrs, _result, name)
2508     _result, = _result
2509     return _result
2510 
2511   else:
2512     try:
2513       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
2514         _ctx._context_handle, _ctx._eager_context.device_name, "Dilation2D",
2515         name, _ctx._post_execution_callbacks, input, filter, "strides",
2516         strides, "rates", rates, "padding", padding)
2517       return _result
2518     except _core._FallbackException:
2519       return dilation2d_eager_fallback(
2520           input, filter, strides=strides, rates=rates, padding=padding,
2521           name=name, ctx=_ctx)
2522     except _core._NotOkStatusException as e:
2523       if name is not None:
2524         message = e.message + " name: " + name
2525       else:
2526         message = e.message
2527       _six.raise_from(_core._status_to_exception(e.code, message), None)
2528 
2529 
2530 def dilation2d_eager_fallback(input, filter, strides, rates, padding, name=None, ctx=None):
2531   r"""This is the slowpath function for Eager mode.
2532   This is for function dilation2d
2533   """
2534   _ctx = ctx if ctx else _context.context()
2535   if not isinstance(strides, (list, tuple)):
2536     raise TypeError(
2537         "Expected list for 'strides' argument to "
2538         "'dilation2d' Op, not %r." % strides)
2539   strides = [_execute.make_int(_i, "strides") for _i in strides]
2540   if not isinstance(rates, (list, tuple)):
2541     raise TypeError(
2542         "Expected list for 'rates' argument to "
2543         "'dilation2d' Op, not %r." % rates)
2544   rates = [_execute.make_int(_i, "rates") for _i in rates]
2545   padding = _execute.make_str(padding, "padding")
2546   _attr_T, _inputs_T = _execute.args_to_matching_eager([input, filter], _ctx)
2547   (input, filter) = _inputs_T
2548   _inputs_flat = [input, filter]
2549   _attrs = ("T", _attr_T, "strides", strides, "rates", rates, "padding",
2550   padding)
2551   _result = _execute.execute(b"Dilation2D", 1, inputs=_inputs_flat,
2552                              attrs=_attrs, ctx=_ctx, name=name)
2553   _execute.record_gradient(
2554       "Dilation2D", _inputs_flat, _attrs, _result, name)
2555   _result, = _result
2556   return _result
2557 
2558 
2559 def dilation2d_backprop_filter(input, filter, out_backprop, strides, rates, padding, name=None):
2560   r"""Computes the gradient of morphological 2-D dilation with respect to the filter.
2561 
2562   Args:
2563     input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
2564       4-D with shape `[batch, in_height, in_width, depth]`.
2565     filter: A `Tensor`. Must have the same type as `input`.
2566       3-D with shape `[filter_height, filter_width, depth]`.
2567     out_backprop: A `Tensor`. Must have the same type as `input`.
2568       4-D with shape `[batch, out_height, out_width, depth]`.
2569     strides: A list of `ints` that has length `>= 4`.
2570       1-D of length 4. The stride of the sliding window for each dimension of
2571       the input tensor. Must be: `[1, stride_height, stride_width, 1]`.
2572     rates: A list of `ints` that has length `>= 4`.
2573       1-D of length 4. The input stride for atrous morphological dilation.
2574       Must be: `[1, rate_height, rate_width, 1]`.
2575     padding: A `string` from: `"SAME", "VALID"`.
2576       The type of padding algorithm to use.
2577     name: A name for the operation (optional).
2578 
2579   Returns:
2580     A `Tensor`. Has the same type as `input`.
2581   """
2582   _ctx = _context._context
2583   if _ctx is None or not _ctx._eager_context.is_eager:
2584     if not isinstance(strides, (list, tuple)):
2585       raise TypeError(
2586           "Expected list for 'strides' argument to "
2587           "'dilation2d_backprop_filter' Op, not %r." % strides)
2588     strides = [_execute.make_int(_i, "strides") for _i in strides]
2589     if not isinstance(rates, (list, tuple)):
2590       raise TypeError(
2591           "Expected list for 'rates' argument to "
2592           "'dilation2d_backprop_filter' Op, not %r." % rates)
2593     rates = [_execute.make_int(_i, "rates") for _i in rates]
2594     padding = _execute.make_str(padding, "padding")
2595     _, _, _op = _op_def_lib._apply_op_helper(
2596         "Dilation2DBackpropFilter", input=input, filter=filter,
2597         out_backprop=out_backprop, strides=strides, rates=rates,
2598         padding=padding, name=name)
2599     _result = _op.outputs[:]
2600     _inputs_flat = _op.inputs
2601     _attrs = ("T", _op.get_attr("T"), "strides", _op.get_attr("strides"),
2602               "rates", _op.get_attr("rates"), "padding",
2603               _op.get_attr("padding"))
2604     _execute.record_gradient(
2605       "Dilation2DBackpropFilter", _inputs_flat, _attrs, _result, name)
2606     _result, = _result
2607     return _result
2608 
2609   else:
2610     try:
2611       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
2612         _ctx._context_handle, _ctx._eager_context.device_name,
2613         "Dilation2DBackpropFilter", name, _ctx._post_execution_callbacks,
2614         input, filter, out_backprop, "strides", strides, "rates", rates,
2615         "padding", padding)
2616       return _result
2617     except _core._FallbackException:
2618       return dilation2d_backprop_filter_eager_fallback(
2619           input, filter, out_backprop, strides=strides, rates=rates,
2620           padding=padding, name=name, ctx=_ctx)
2621     except _core._NotOkStatusException as e:
2622       if name is not None:
2623         message = e.message + " name: " + name
2624       else:
2625         message = e.message
2626       _six.raise_from(_core._status_to_exception(e.code, message), None)
2627 
2628 
2629 def dilation2d_backprop_filter_eager_fallback(input, filter, out_backprop, strides, rates, padding, name=None, ctx=None):
2630   r"""This is the slowpath function for Eager mode.
2631   This is for function dilation2d_backprop_filter
2632   """
2633   _ctx = ctx if ctx else _context.context()
2634   if not isinstance(strides, (list, tuple)):
2635     raise TypeError(
2636         "Expected list for 'strides' argument to "
2637         "'dilation2d_backprop_filter' Op, not %r." % strides)
2638   strides = [_execute.make_int(_i, "strides") for _i in strides]
2639   if not isinstance(rates, (list, tuple)):
2640     raise TypeError(
2641         "Expected list for 'rates' argument to "
2642         "'dilation2d_backprop_filter' Op, not %r." % rates)
2643   rates = [_execute.make_int(_i, "rates") for _i in rates]
2644   padding = _execute.make_str(padding, "padding")
2645   _attr_T, _inputs_T = _execute.args_to_matching_eager([input, filter, out_backprop], _ctx)
2646   (input, filter, out_backprop) = _inputs_T
2647   _inputs_flat = [input, filter, out_backprop]
2648   _attrs = ("T", _attr_T, "strides", strides, "rates", rates, "padding",
2649   padding)
2650   _result = _execute.execute(b"Dilation2DBackpropFilter", 1,
2651                              inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
2652                              name=name)
2653   _execute.record_gradient(
2654       "Dilation2DBackpropFilter", _inputs_flat, _attrs, _result, name)
2655   _result, = _result
2656   return _result
2657 
2658 
2659 def dilation2d_backprop_input(input, filter, out_backprop, strides, rates, padding, name=None):
2660   r"""Computes the gradient of morphological 2-D dilation with respect to the input.
2661 
2662   Args:
2663     input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
2664       4-D with shape `[batch, in_height, in_width, depth]`.
2665     filter: A `Tensor`. Must have the same type as `input`.
2666       3-D with shape `[filter_height, filter_width, depth]`.
2667     out_backprop: A `Tensor`. Must have the same type as `input`.
2668       4-D with shape `[batch, out_height, out_width, depth]`.
2669     strides: A list of `ints` that has length `>= 4`.
2670       1-D of length 4. The stride of the sliding window for each dimension of
2671       the input tensor. Must be: `[1, stride_height, stride_width, 1]`.
2672     rates: A list of `ints` that has length `>= 4`.
2673       1-D of length 4. The input stride for atrous morphological dilation.
2674       Must be: `[1, rate_height, rate_width, 1]`.
2675     padding: A `string` from: `"SAME", "VALID"`.
2676       The type of padding algorithm to use.
2677     name: A name for the operation (optional).
2678 
2679   Returns:
2680     A `Tensor`. Has the same type as `input`.
2681   """
2682   _ctx = _context._context
2683   if _ctx is None or not _ctx._eager_context.is_eager:
2684     if not isinstance(strides, (list, tuple)):
2685       raise TypeError(
2686           "Expected list for 'strides' argument to "
2687           "'dilation2d_backprop_input' Op, not %r." % strides)
2688     strides = [_execute.make_int(_i, "strides") for _i in strides]
2689     if not isinstance(rates, (list, tuple)):
2690       raise TypeError(
2691           "Expected list for 'rates' argument to "
2692           "'dilation2d_backprop_input' Op, not %r." % rates)
2693     rates = [_execute.make_int(_i, "rates") for _i in rates]
2694     padding = _execute.make_str(padding, "padding")
2695     _, _, _op = _op_def_lib._apply_op_helper(
2696         "Dilation2DBackpropInput", input=input, filter=filter,
2697         out_backprop=out_backprop, strides=strides, rates=rates,
2698         padding=padding, name=name)
2699     _result = _op.outputs[:]
2700     _inputs_flat = _op.inputs
2701     _attrs = ("T", _op.get_attr("T"), "strides", _op.get_attr("strides"),
2702               "rates", _op.get_attr("rates"), "padding",
2703               _op.get_attr("padding"))
2704     _execute.record_gradient(
2705       "Dilation2DBackpropInput", _inputs_flat, _attrs, _result, name)
2706     _result, = _result
2707     return _result
2708 
2709   else:
2710     try:
2711       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
2712         _ctx._context_handle, _ctx._eager_context.device_name,
2713         "Dilation2DBackpropInput", name, _ctx._post_execution_callbacks,
2714         input, filter, out_backprop, "strides", strides, "rates", rates,
2715         "padding", padding)
2716       return _result
2717     except _core._FallbackException:
2718       return dilation2d_backprop_input_eager_fallback(
2719           input, filter, out_backprop, strides=strides, rates=rates,
2720           padding=padding, name=name, ctx=_ctx)
2721     except _core._NotOkStatusException as e:
2722       if name is not None:
2723         message = e.message + " name: " + name
2724       else:
2725         message = e.message
2726       _six.raise_from(_core._status_to_exception(e.code, message), None)
2727 
2728 
2729 def dilation2d_backprop_input_eager_fallback(input, filter, out_backprop, strides, rates, padding, name=None, ctx=None):
2730   r"""This is the slowpath function for Eager mode.
2731   This is for function dilation2d_backprop_input
2732   """
2733   _ctx = ctx if ctx else _context.context()
2734   if not isinstance(strides, (list, tuple)):
2735     raise TypeError(
2736         "Expected list for 'strides' argument to "
2737         "'dilation2d_backprop_input' Op, not %r." % strides)
2738   strides = [_execute.make_int(_i, "strides") for _i in strides]
2739   if not isinstance(rates, (list, tuple)):
2740     raise TypeError(
2741         "Expected list for 'rates' argument to "
2742         "'dilation2d_backprop_input' Op, not %r." % rates)
2743   rates = [_execute.make_int(_i, "rates") for _i in rates]
2744   padding = _execute.make_str(padding, "padding")
2745   _attr_T, _inputs_T = _execute.args_to_matching_eager([input, filter, out_backprop], _ctx)
2746   (input, filter, out_backprop) = _inputs_T
2747   _inputs_flat = [input, filter, out_backprop]
2748   _attrs = ("T", _attr_T, "strides", strides, "rates", rates, "padding",
2749   padding)
2750   _result = _execute.execute(b"Dilation2DBackpropInput", 1,
2751                              inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
2752                              name=name)
2753   _execute.record_gradient(
2754       "Dilation2DBackpropInput", _inputs_flat, _attrs, _result, name)
2755   _result, = _result
2756   return _result
2757 
2758 
2759 @tf_export('nn.elu')
2760 def elu(features, name=None):
2761   r"""Computes exponential linear: `exp(features) - 1` if < 0, `features` otherwise.
2762 
2763   See [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs)
2764   ](http://arxiv.org/abs/1511.07289)
2765 
2766   Args:
2767     features: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
2768     name: A name for the operation (optional).
2769 
2770   Returns:
2771     A `Tensor`. Has the same type as `features`.
2772   """
2773   _ctx = _context._context
2774   if _ctx is None or not _ctx._eager_context.is_eager:
2775     _, _, _op = _op_def_lib._apply_op_helper(
2776         "Elu", features=features, name=name)
2777     _result = _op.outputs[:]
2778     _inputs_flat = _op.inputs
2779     _attrs = ("T", _op.get_attr("T"))
2780     _execute.record_gradient(
2781       "Elu", _inputs_flat, _attrs, _result, name)
2782     _result, = _result
2783     return _result
2784 
2785   else:
2786     try:
2787       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
2788         _ctx._context_handle, _ctx._eager_context.device_name, "Elu", name,
2789         _ctx._post_execution_callbacks, features)
2790       return _result
2791     except _core._FallbackException:
2792       return elu_eager_fallback(
2793           features, name=name, ctx=_ctx)
2794     except _core._NotOkStatusException as e:
2795       if name is not None:
2796         message = e.message + " name: " + name
2797       else:
2798         message = e.message
2799       _six.raise_from(_core._status_to_exception(e.code, message), None)
2800 
2801 
2802 def elu_eager_fallback(features, name=None, ctx=None):
2803   r"""This is the slowpath function for Eager mode.
2804   This is for function elu
2805   """
2806   _ctx = ctx if ctx else _context.context()
2807   _attr_T, (features,) = _execute.args_to_matching_eager([features], _ctx)
2808   _inputs_flat = [features]
2809   _attrs = ("T", _attr_T)
2810   _result = _execute.execute(b"Elu", 1, inputs=_inputs_flat, attrs=_attrs,
2811                              ctx=_ctx, name=name)
2812   _execute.record_gradient(
2813       "Elu", _inputs_flat, _attrs, _result, name)
2814   _result, = _result
2815   return _result
2816 
2817 
2818 def elu_grad(gradients, outputs, name=None):
2819   r"""Computes gradients for the exponential linear (Elu) operation.
2820 
2821   Args:
2822     gradients: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
2823       The backpropagated gradients to the corresponding Elu operation.
2824     outputs: A `Tensor`. Must have the same type as `gradients`.
2825       The outputs of the corresponding Elu operation.
2826     name: A name for the operation (optional).
2827 
2828   Returns:
2829     A `Tensor`. Has the same type as `gradients`.
2830   """
2831   _ctx = _context._context
2832   if _ctx is None or not _ctx._eager_context.is_eager:
2833     _, _, _op = _op_def_lib._apply_op_helper(
2834         "EluGrad", gradients=gradients, outputs=outputs, name=name)
2835     _result = _op.outputs[:]
2836     _inputs_flat = _op.inputs
2837     _attrs = ("T", _op.get_attr("T"))
2838     _execute.record_gradient(
2839       "EluGrad", _inputs_flat, _attrs, _result, name)
2840     _result, = _result
2841     return _result
2842 
2843   else:
2844     try:
2845       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
2846         _ctx._context_handle, _ctx._eager_context.device_name, "EluGrad",
2847         name, _ctx._post_execution_callbacks, gradients, outputs)
2848       return _result
2849     except _core._FallbackException:
2850       return elu_grad_eager_fallback(
2851           gradients, outputs, name=name, ctx=_ctx)
2852     except _core._NotOkStatusException as e:
2853       if name is not None:
2854         message = e.message + " name: " + name
2855       else:
2856         message = e.message
2857       _six.raise_from(_core._status_to_exception(e.code, message), None)
2858 
2859 
2860 def elu_grad_eager_fallback(gradients, outputs, name=None, ctx=None):
2861   r"""This is the slowpath function for Eager mode.
2862   This is for function elu_grad
2863   """
2864   _ctx = ctx if ctx else _context.context()
2865   _attr_T, _inputs_T = _execute.args_to_matching_eager([gradients, outputs], _ctx)
2866   (gradients, outputs) = _inputs_T
2867   _inputs_flat = [gradients, outputs]
2868   _attrs = ("T", _attr_T)
2869   _result = _execute.execute(b"EluGrad", 1, inputs=_inputs_flat, attrs=_attrs,
2870                              ctx=_ctx, name=name)
2871   _execute.record_gradient(
2872       "EluGrad", _inputs_flat, _attrs, _result, name)
2873   _result, = _result
2874   return _result
2875 
2876 
2877 _fractional_avg_pool_outputs = ["output", "row_pooling_sequence",
2878                                "col_pooling_sequence"]
2879 _FractionalAvgPoolOutput = _collections.namedtuple(
2880     "FractionalAvgPool", _fractional_avg_pool_outputs)
2881 
2882 
2883 @tf_export('nn.fractional_avg_pool')
2884 def fractional_avg_pool(value, pooling_ratio, pseudo_random=False, overlapping=False, deterministic=False, seed=0, seed2=0, name=None):
2885   r"""Performs fractional average pooling on the input.
2886 
2887   Fractional average pooling is similar to Fractional max pooling in the pooling
2888   region generation step. The only difference is that after pooling regions are
2889   generated, a mean operation is performed instead of a max operation in each
2890   pooling region.
2891 
2892   Args:
2893     value: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `int64`.
2894       4-D with shape `[batch, height, width, channels]`.
2895     pooling_ratio: A list of `floats` that has length `>= 4`.
2896       Pooling ratio for each dimension of `value`, currently only
2897       supports row and col dimension and should be >= 1.0. For example, a valid
2898       pooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements
2899       must be 1.0 because we don't allow pooling on batch and channels
2900       dimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions
2901       respectively.
2902     pseudo_random: An optional `bool`. Defaults to `False`.
2903       When set to True, generates the pooling sequence in a
2904       pseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin
2905       Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) for
2906       difference between pseudorandom and random.
2907     overlapping: An optional `bool`. Defaults to `False`.
2908       When set to True, it means when pooling, the values at the boundary
2909       of adjacent pooling cells are used by both cells. For example:
2910 
2911       `index  0  1  2  3  4`
2912 
2913       `value  20 5  16 3  7`
2914 
2915       If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.
2916       The result would be [41/3, 26/3] for fractional avg pooling.
2917     deterministic: An optional `bool`. Defaults to `False`.
2918       When set to True, a fixed pooling region will be used when
2919       iterating over a FractionalAvgPool node in the computation graph. Mainly used
2920       in unit test to make FractionalAvgPool deterministic.
2921     seed: An optional `int`. Defaults to `0`.
2922       If either seed or seed2 are set to be non-zero, the random number
2923       generator is seeded by the given seed.  Otherwise, it is seeded by a
2924       random seed.
2925     seed2: An optional `int`. Defaults to `0`.
2926       An second seed to avoid seed collision.
2927     name: A name for the operation (optional).
2928 
2929   Returns:
2930     A tuple of `Tensor` objects (output, row_pooling_sequence, col_pooling_sequence).
2931 
2932     output: A `Tensor`. Has the same type as `value`.
2933     row_pooling_sequence: A `Tensor` of type `int64`.
2934     col_pooling_sequence: A `Tensor` of type `int64`.
2935   """
2936   _ctx = _context._context
2937   if _ctx is None or not _ctx._eager_context.is_eager:
2938     if not isinstance(pooling_ratio, (list, tuple)):
2939       raise TypeError(
2940           "Expected list for 'pooling_ratio' argument to "
2941           "'fractional_avg_pool' Op, not %r." % pooling_ratio)
2942     pooling_ratio = [_execute.make_float(_f, "pooling_ratio") for _f in pooling_ratio]
2943     if pseudo_random is None:
2944       pseudo_random = False
2945     pseudo_random = _execute.make_bool(pseudo_random, "pseudo_random")
2946     if overlapping is None:
2947       overlapping = False
2948     overlapping = _execute.make_bool(overlapping, "overlapping")
2949     if deterministic is None:
2950       deterministic = False
2951     deterministic = _execute.make_bool(deterministic, "deterministic")
2952     if seed is None:
2953       seed = 0
2954     seed = _execute.make_int(seed, "seed")
2955     if seed2 is None:
2956       seed2 = 0
2957     seed2 = _execute.make_int(seed2, "seed2")
2958     _, _, _op = _op_def_lib._apply_op_helper(
2959         "FractionalAvgPool", value=value, pooling_ratio=pooling_ratio,
2960         pseudo_random=pseudo_random, overlapping=overlapping,
2961         deterministic=deterministic, seed=seed, seed2=seed2, name=name)
2962     _result = _op.outputs[:]
2963     _inputs_flat = _op.inputs
2964     _attrs = ("pooling_ratio", _op.get_attr("pooling_ratio"), "pseudo_random",
2965               _op.get_attr("pseudo_random"), "overlapping",
2966               _op.get_attr("overlapping"), "deterministic",
2967               _op.get_attr("deterministic"), "seed", _op.get_attr("seed"),
2968               "seed2", _op.get_attr("seed2"), "T", _op.get_attr("T"))
2969     _execute.record_gradient(
2970       "FractionalAvgPool", _inputs_flat, _attrs, _result, name)
2971     _result = _FractionalAvgPoolOutput._make(_result)
2972     return _result
2973 
2974   else:
2975     try:
2976       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
2977         _ctx._context_handle, _ctx._eager_context.device_name,
2978         "FractionalAvgPool", name, _ctx._post_execution_callbacks, value,
2979         "pooling_ratio", pooling_ratio, "pseudo_random", pseudo_random,
2980         "overlapping", overlapping, "deterministic", deterministic, "seed",
2981         seed, "seed2", seed2)
2982       _result = _FractionalAvgPoolOutput._make(_result)
2983       return _result
2984     except _core._FallbackException:
2985       return fractional_avg_pool_eager_fallback(
2986           value, pooling_ratio=pooling_ratio, pseudo_random=pseudo_random,
2987           overlapping=overlapping, deterministic=deterministic, seed=seed,
2988           seed2=seed2, name=name, ctx=_ctx)
2989     except _core._NotOkStatusException as e:
2990       if name is not None:
2991         message = e.message + " name: " + name
2992       else:
2993         message = e.message
2994       _six.raise_from(_core._status_to_exception(e.code, message), None)
2995 
2996 
2997 def fractional_avg_pool_eager_fallback(value, pooling_ratio, pseudo_random=False, overlapping=False, deterministic=False, seed=0, seed2=0, name=None, ctx=None):
2998   r"""This is the slowpath function for Eager mode.
2999   This is for function fractional_avg_pool
3000   """
3001   _ctx = ctx if ctx else _context.context()
3002   if not isinstance(pooling_ratio, (list, tuple)):
3003     raise TypeError(
3004         "Expected list for 'pooling_ratio' argument to "
3005         "'fractional_avg_pool' Op, not %r." % pooling_ratio)
3006   pooling_ratio = [_execute.make_float(_f, "pooling_ratio") for _f in pooling_ratio]
3007   if pseudo_random is None:
3008     pseudo_random = False
3009   pseudo_random = _execute.make_bool(pseudo_random, "pseudo_random")
3010   if overlapping is None:
3011     overlapping = False
3012   overlapping = _execute.make_bool(overlapping, "overlapping")
3013   if deterministic is None:
3014     deterministic = False
3015   deterministic = _execute.make_bool(deterministic, "deterministic")
3016   if seed is None:
3017     seed = 0
3018   seed = _execute.make_int(seed, "seed")
3019   if seed2 is None:
3020     seed2 = 0
3021   seed2 = _execute.make_int(seed2, "seed2")
3022   _attr_T, (value,) = _execute.args_to_matching_eager([value], _ctx)
3023   _inputs_flat = [value]
3024   _attrs = ("pooling_ratio", pooling_ratio, "pseudo_random", pseudo_random,
3025   "overlapping", overlapping, "deterministic", deterministic, "seed", seed,
3026   "seed2", seed2, "T", _attr_T)
3027   _result = _execute.execute(b"FractionalAvgPool", 3, inputs=_inputs_flat,
3028                              attrs=_attrs, ctx=_ctx, name=name)
3029   _execute.record_gradient(
3030       "FractionalAvgPool", _inputs_flat, _attrs, _result, name)
3031   _result = _FractionalAvgPoolOutput._make(_result)
3032   return _result
3033 
3034 
3035 def fractional_avg_pool_grad(orig_input_tensor_shape, out_backprop, row_pooling_sequence, col_pooling_sequence, overlapping=False, name=None):
3036   r"""Computes gradient of the FractionalAvgPool function.
3037 
3038   Unlike FractionalMaxPoolGrad, we don't need to find arg_max for
3039   FractionalAvgPoolGrad, we just need to evenly back-propagate each element of
3040   out_backprop to those indices that form the same pooling cell. Therefore, we
3041   just need to know the shape of original input tensor, instead of the whole
3042   tensor.
3043 
3044   Args:
3045     orig_input_tensor_shape: A `Tensor` of type `int64`.
3046       Original input tensor shape for `fractional_avg_pool`
3047     out_backprop: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `int64`.
3048       4-D with shape `[batch, height, width, channels]`.  Gradients
3049       w.r.t. the output of `fractional_avg_pool`.
3050     row_pooling_sequence: A `Tensor` of type `int64`.
3051       row pooling sequence, form pooling region with
3052       col_pooling_sequence.
3053     col_pooling_sequence: A `Tensor` of type `int64`.
3054       column pooling sequence, form pooling region with
3055       row_pooling sequence.
3056     overlapping: An optional `bool`. Defaults to `False`.
3057       When set to True, it means when pooling, the values at the boundary
3058       of adjacent pooling cells are used by both cells. For example:
3059 
3060       `index  0  1  2  3  4`
3061 
3062       `value  20 5  16 3  7`
3063 
3064       If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.
3065       The result would be [41/3, 26/3] for fractional avg pooling.
3066     name: A name for the operation (optional).
3067 
3068   Returns:
3069     A `Tensor`. Has the same type as `out_backprop`.
3070   """
3071   _ctx = _context._context
3072   if _ctx is None or not _ctx._eager_context.is_eager:
3073     if overlapping is None:
3074       overlapping = False
3075     overlapping = _execute.make_bool(overlapping, "overlapping")
3076     _, _, _op = _op_def_lib._apply_op_helper(
3077         "FractionalAvgPoolGrad",
3078         orig_input_tensor_shape=orig_input_tensor_shape,
3079         out_backprop=out_backprop, row_pooling_sequence=row_pooling_sequence,
3080         col_pooling_sequence=col_pooling_sequence, overlapping=overlapping,
3081         name=name)
3082     _result = _op.outputs[:]
3083     _inputs_flat = _op.inputs
3084     _attrs = ("overlapping", _op.get_attr("overlapping"), "T",
3085               _op.get_attr("T"))
3086     _execute.record_gradient(
3087       "FractionalAvgPoolGrad", _inputs_flat, _attrs, _result, name)
3088     _result, = _result
3089     return _result
3090 
3091   else:
3092     try:
3093       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
3094         _ctx._context_handle, _ctx._eager_context.device_name,
3095         "FractionalAvgPoolGrad", name, _ctx._post_execution_callbacks,
3096         orig_input_tensor_shape, out_backprop, row_pooling_sequence,
3097         col_pooling_sequence, "overlapping", overlapping)
3098       return _result
3099     except _core._FallbackException:
3100       return fractional_avg_pool_grad_eager_fallback(
3101           orig_input_tensor_shape, out_backprop, row_pooling_sequence,
3102           col_pooling_sequence, overlapping=overlapping, name=name, ctx=_ctx)
3103     except _core._NotOkStatusException as e:
3104       if name is not None:
3105         message = e.message + " name: " + name
3106       else:
3107         message = e.message
3108       _six.raise_from(_core._status_to_exception(e.code, message), None)
3109 
3110 
3111 def fractional_avg_pool_grad_eager_fallback(orig_input_tensor_shape, out_backprop, row_pooling_sequence, col_pooling_sequence, overlapping=False, name=None, ctx=None):
3112   r"""This is the slowpath function for Eager mode.
3113   This is for function fractional_avg_pool_grad
3114   """
3115   _ctx = ctx if ctx else _context.context()
3116   if overlapping is None:
3117     overlapping = False
3118   overlapping = _execute.make_bool(overlapping, "overlapping")
3119   _attr_T, (out_backprop,) = _execute.args_to_matching_eager([out_backprop], _ctx)
3120   orig_input_tensor_shape = _ops.convert_to_tensor(orig_input_tensor_shape, _dtypes.int64)
3121   row_pooling_sequence = _ops.convert_to_tensor(row_pooling_sequence, _dtypes.int64)
3122   col_pooling_sequence = _ops.convert_to_tensor(col_pooling_sequence, _dtypes.int64)
3123   _inputs_flat = [orig_input_tensor_shape, out_backprop, row_pooling_sequence, col_pooling_sequence]
3124   _attrs = ("overlapping", overlapping, "T", _attr_T)
3125   _result = _execute.execute(b"FractionalAvgPoolGrad", 1, inputs=_inputs_flat,
3126                              attrs=_attrs, ctx=_ctx, name=name)
3127   _execute.record_gradient(
3128       "FractionalAvgPoolGrad", _inputs_flat, _attrs, _result, name)
3129   _result, = _result
3130   return _result
3131 
3132 
3133 _fractional_max_pool_outputs = ["output", "row_pooling_sequence",
3134                                "col_pooling_sequence"]
3135 _FractionalMaxPoolOutput = _collections.namedtuple(
3136     "FractionalMaxPool", _fractional_max_pool_outputs)
3137 
3138 
3139 @tf_export('nn.fractional_max_pool')
3140 def fractional_max_pool(value, pooling_ratio, pseudo_random=False, overlapping=False, deterministic=False, seed=0, seed2=0, name=None):
3141   r"""Performs fractional max pooling on the input.
3142 
3143   Fractional max pooling is slightly different than regular max pooling.  In
3144   regular max pooling, you downsize an input set by taking the maximum value of
3145   smaller N x N subsections of the set (often 2x2), and try to reduce the set by
3146   a factor of N, where N is an integer.  Fractional max pooling, as you might
3147   expect from the word "fractional", means that the overall reduction ratio N
3148   does not have to be an integer.
3149 
3150   The sizes of the pooling regions are generated randomly but are fairly uniform.
3151   For example, let's look at the height dimension, and the constraints on the
3152   list of rows that will be pool boundaries.
3153 
3154   First we define the following:
3155 
3156   1.  input_row_length : the number of rows from the input set
3157   2.  output_row_length : which will be smaller than the input
3158   3.  alpha = input_row_length / output_row_length : our reduction ratio
3159   4.  K = floor(alpha)
3160   5.  row_pooling_sequence : this is the result list of pool boundary rows
3161 
3162   Then, row_pooling_sequence should satisfy:
3163 
3164   1.  a[0] = 0 : the first value of the sequence is 0
3165   2.  a[end] = input_row_length : the last value of the sequence is the size
3166   3.  K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size
3167   4.  length(row_pooling_sequence) = output_row_length+1
3168 
3169   For more details on fractional max pooling, see this paper:
3170   [Benjamin Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071)
3171 
3172   Args:
3173     value: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `int64`.
3174       4-D with shape `[batch, height, width, channels]`.
3175     pooling_ratio: A list of `floats` that has length `>= 4`.
3176       Pooling ratio for each dimension of `value`, currently only
3177       supports row and col dimension and should be >= 1.0. For example, a valid
3178       pooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements
3179       must be 1.0 because we don't allow pooling on batch and channels
3180       dimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions
3181       respectively.
3182     pseudo_random: An optional `bool`. Defaults to `False`.
3183       When set to True, generates the pooling sequence in a
3184       pseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin
3185       Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) for
3186       difference between pseudorandom and random.
3187     overlapping: An optional `bool`. Defaults to `False`.
3188       When set to True, it means when pooling, the values at the boundary
3189       of adjacent pooling cells are used by both cells. For example:
3190 
3191       `index  0  1  2  3  4`
3192 
3193       `value  20 5  16 3  7`
3194 
3195       If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.
3196       The result would be [20, 16] for fractional max pooling.
3197     deterministic: An optional `bool`. Defaults to `False`.
3198       When set to True, a fixed pooling region will be used when
3199       iterating over a FractionalMaxPool node in the computation graph. Mainly used
3200       in unit test to make FractionalMaxPool deterministic.
3201     seed: An optional `int`. Defaults to `0`.
3202       If either seed or seed2 are set to be non-zero, the random number
3203       generator is seeded by the given seed.  Otherwise, it is seeded by a
3204       random seed.
3205     seed2: An optional `int`. Defaults to `0`.
3206       An second seed to avoid seed collision.
3207     name: A name for the operation (optional).
3208 
3209   Returns:
3210     A tuple of `Tensor` objects (output, row_pooling_sequence, col_pooling_sequence).
3211 
3212     output: A `Tensor`. Has the same type as `value`.
3213     row_pooling_sequence: A `Tensor` of type `int64`.
3214     col_pooling_sequence: A `Tensor` of type `int64`.
3215   """
3216   _ctx = _context._context
3217   if _ctx is None or not _ctx._eager_context.is_eager:
3218     if not isinstance(pooling_ratio, (list, tuple)):
3219       raise TypeError(
3220           "Expected list for 'pooling_ratio' argument to "
3221           "'fractional_max_pool' Op, not %r." % pooling_ratio)
3222     pooling_ratio = [_execute.make_float(_f, "pooling_ratio") for _f in pooling_ratio]
3223     if pseudo_random is None:
3224       pseudo_random = False
3225     pseudo_random = _execute.make_bool(pseudo_random, "pseudo_random")
3226     if overlapping is None:
3227       overlapping = False
3228     overlapping = _execute.make_bool(overlapping, "overlapping")
3229     if deterministic is None:
3230       deterministic = False
3231     deterministic = _execute.make_bool(deterministic, "deterministic")
3232     if seed is None:
3233       seed = 0
3234     seed = _execute.make_int(seed, "seed")
3235     if seed2 is None:
3236       seed2 = 0
3237     seed2 = _execute.make_int(seed2, "seed2")
3238     _, _, _op = _op_def_lib._apply_op_helper(
3239         "FractionalMaxPool", value=value, pooling_ratio=pooling_ratio,
3240         pseudo_random=pseudo_random, overlapping=overlapping,
3241         deterministic=deterministic, seed=seed, seed2=seed2, name=name)
3242     _result = _op.outputs[:]
3243     _inputs_flat = _op.inputs
3244     _attrs = ("pooling_ratio", _op.get_attr("pooling_ratio"), "pseudo_random",
3245               _op.get_attr("pseudo_random"), "overlapping",
3246               _op.get_attr("overlapping"), "deterministic",
3247               _op.get_attr("deterministic"), "seed", _op.get_attr("seed"),
3248               "seed2", _op.get_attr("seed2"), "T", _op.get_attr("T"))
3249     _execute.record_gradient(
3250       "FractionalMaxPool", _inputs_flat, _attrs, _result, name)
3251     _result = _FractionalMaxPoolOutput._make(_result)
3252     return _result
3253 
3254   else:
3255     try:
3256       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
3257         _ctx._context_handle, _ctx._eager_context.device_name,
3258         "FractionalMaxPool", name, _ctx._post_execution_callbacks, value,
3259         "pooling_ratio", pooling_ratio, "pseudo_random", pseudo_random,
3260         "overlapping", overlapping, "deterministic", deterministic, "seed",
3261         seed, "seed2", seed2)
3262       _result = _FractionalMaxPoolOutput._make(_result)
3263       return _result
3264     except _core._FallbackException:
3265       return fractional_max_pool_eager_fallback(
3266           value, pooling_ratio=pooling_ratio, pseudo_random=pseudo_random,
3267           overlapping=overlapping, deterministic=deterministic, seed=seed,
3268           seed2=seed2, name=name, ctx=_ctx)
3269     except _core._NotOkStatusException as e:
3270       if name is not None:
3271         message = e.message + " name: " + name
3272       else:
3273         message = e.message
3274       _six.raise_from(_core._status_to_exception(e.code, message), None)
3275 
3276 
3277 def fractional_max_pool_eager_fallback(value, pooling_ratio, pseudo_random=False, overlapping=False, deterministic=False, seed=0, seed2=0, name=None, ctx=None):
3278   r"""This is the slowpath function for Eager mode.
3279   This is for function fractional_max_pool
3280   """
3281   _ctx = ctx if ctx else _context.context()
3282   if not isinstance(pooling_ratio, (list, tuple)):
3283     raise TypeError(
3284         "Expected list for 'pooling_ratio' argument to "
3285         "'fractional_max_pool' Op, not %r." % pooling_ratio)
3286   pooling_ratio = [_execute.make_float(_f, "pooling_ratio") for _f in pooling_ratio]
3287   if pseudo_random is None:
3288     pseudo_random = False
3289   pseudo_random = _execute.make_bool(pseudo_random, "pseudo_random")
3290   if overlapping is None:
3291     overlapping = False
3292   overlapping = _execute.make_bool(overlapping, "overlapping")
3293   if deterministic is None:
3294     deterministic = False
3295   deterministic = _execute.make_bool(deterministic, "deterministic")
3296   if seed is None:
3297     seed = 0
3298   seed = _execute.make_int(seed, "seed")
3299   if seed2 is None:
3300     seed2 = 0
3301   seed2 = _execute.make_int(seed2, "seed2")
3302   _attr_T, (value,) = _execute.args_to_matching_eager([value], _ctx)
3303   _inputs_flat = [value]
3304   _attrs = ("pooling_ratio", pooling_ratio, "pseudo_random", pseudo_random,
3305   "overlapping", overlapping, "deterministic", deterministic, "seed", seed,
3306   "seed2", seed2, "T", _attr_T)
3307   _result = _execute.execute(b"FractionalMaxPool", 3, inputs=_inputs_flat,
3308                              attrs=_attrs, ctx=_ctx, name=name)
3309   _execute.record_gradient(
3310       "FractionalMaxPool", _inputs_flat, _attrs, _result, name)
3311   _result = _FractionalMaxPoolOutput._make(_result)
3312   return _result
3313 
3314 
3315 def fractional_max_pool_grad(orig_input, orig_output, out_backprop, row_pooling_sequence, col_pooling_sequence, overlapping=False, name=None):
3316   r"""Computes gradient of the FractionalMaxPool function.
3317 
3318   Args:
3319     orig_input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `int64`.
3320       Original input for `fractional_max_pool`
3321     orig_output: A `Tensor`. Must have the same type as `orig_input`.
3322       Original output for `fractional_max_pool`
3323     out_backprop: A `Tensor`. Must have the same type as `orig_input`.
3324       4-D with shape `[batch, height, width, channels]`.  Gradients
3325       w.r.t. the output of `fractional_max_pool`.
3326     row_pooling_sequence: A `Tensor` of type `int64`.
3327       row pooling sequence, form pooling region with
3328       col_pooling_sequence.
3329     col_pooling_sequence: A `Tensor` of type `int64`.
3330       column pooling sequence, form pooling region with
3331       row_pooling sequence.
3332     overlapping: An optional `bool`. Defaults to `False`.
3333       When set to True, it means when pooling, the values at the boundary
3334       of adjacent pooling cells are used by both cells. For example:
3335 
3336       `index  0  1  2  3  4`
3337 
3338       `value  20 5  16 3  7`
3339 
3340       If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.
3341       The result would be [20, 16] for fractional max pooling.
3342     name: A name for the operation (optional).
3343 
3344   Returns:
3345     A `Tensor`. Has the same type as `orig_input`.
3346   """
3347   _ctx = _context._context
3348   if _ctx is None or not _ctx._eager_context.is_eager:
3349     if overlapping is None:
3350       overlapping = False
3351     overlapping = _execute.make_bool(overlapping, "overlapping")
3352     _, _, _op = _op_def_lib._apply_op_helper(
3353         "FractionalMaxPoolGrad", orig_input=orig_input,
3354         orig_output=orig_output, out_backprop=out_backprop,
3355         row_pooling_sequence=row_pooling_sequence,
3356         col_pooling_sequence=col_pooling_sequence, overlapping=overlapping,
3357         name=name)
3358     _result = _op.outputs[:]
3359     _inputs_flat = _op.inputs
3360     _attrs = ("overlapping", _op.get_attr("overlapping"), "T",
3361               _op.get_attr("T"))
3362     _execute.record_gradient(
3363       "FractionalMaxPoolGrad", _inputs_flat, _attrs, _result, name)
3364     _result, = _result
3365     return _result
3366 
3367   else:
3368     try:
3369       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
3370         _ctx._context_handle, _ctx._eager_context.device_name,
3371         "FractionalMaxPoolGrad", name, _ctx._post_execution_callbacks,
3372         orig_input, orig_output, out_backprop, row_pooling_sequence,
3373         col_pooling_sequence, "overlapping", overlapping)
3374       return _result
3375     except _core._FallbackException:
3376       return fractional_max_pool_grad_eager_fallback(
3377           orig_input, orig_output, out_backprop, row_pooling_sequence,
3378           col_pooling_sequence, overlapping=overlapping, name=name, ctx=_ctx)
3379     except _core._NotOkStatusException as e:
3380       if name is not None:
3381         message = e.message + " name: " + name
3382       else:
3383         message = e.message
3384       _six.raise_from(_core._status_to_exception(e.code, message), None)
3385 
3386 
3387 def fractional_max_pool_grad_eager_fallback(orig_input, orig_output, out_backprop, row_pooling_sequence, col_pooling_sequence, overlapping=False, name=None, ctx=None):
3388   r"""This is the slowpath function for Eager mode.
3389   This is for function fractional_max_pool_grad
3390   """
3391   _ctx = ctx if ctx else _context.context()
3392   if overlapping is None:
3393     overlapping = False
3394   overlapping = _execute.make_bool(overlapping, "overlapping")
3395   _attr_T, _inputs_T = _execute.args_to_matching_eager([orig_input, orig_output, out_backprop], _ctx)
3396   (orig_input, orig_output, out_backprop) = _inputs_T
3397   row_pooling_sequence = _ops.convert_to_tensor(row_pooling_sequence, _dtypes.int64)
3398   col_pooling_sequence = _ops.convert_to_tensor(col_pooling_sequence, _dtypes.int64)
3399   _inputs_flat = [orig_input, orig_output, out_backprop, row_pooling_sequence, col_pooling_sequence]
3400   _attrs = ("overlapping", overlapping, "T", _attr_T)
3401   _result = _execute.execute(b"FractionalMaxPoolGrad", 1, inputs=_inputs_flat,
3402                              attrs=_attrs, ctx=_ctx, name=name)
3403   _execute.record_gradient(
3404       "FractionalMaxPoolGrad", _inputs_flat, _attrs, _result, name)
3405   _result, = _result
3406   return _result
3407 
3408 
3409 __fused_batch_norm_outputs = ["y", "batch_mean", "batch_variance",
3410                              "reserve_space_1", "reserve_space_2"]
3411 _FusedBatchNormOutput = _collections.namedtuple(
3412     "FusedBatchNorm", __fused_batch_norm_outputs)
3413 
3414 
3415 def _fused_batch_norm(x, scale, offset, mean, variance, epsilon=0.0001, data_format="NHWC", is_training=True, name=None):
3416   r"""Batch normalization.
3417 
3418   Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
3419   The size of 1D Tensors matches the dimension C of the 4D Tensors.
3420 
3421   Args:
3422     x: A `Tensor`. Must be one of the following types: `float32`.
3423       A 4D Tensor for input data.
3424     scale: A `Tensor`. Must have the same type as `x`.
3425       A 1D Tensor for scaling factor, to scale the normalized x.
3426     offset: A `Tensor`. Must have the same type as `x`.
3427       A 1D Tensor for offset, to shift to the normalized x.
3428     mean: A `Tensor`. Must have the same type as `x`.
3429       A 1D Tensor for population mean. Used for inference only;
3430       must be empty for training.
3431     variance: A `Tensor`. Must have the same type as `x`.
3432       A 1D Tensor for population variance. Used for inference only;
3433       must be empty for training.
3434     epsilon: An optional `float`. Defaults to `0.0001`.
3435       A small float number added to the variance of x.
3436     data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`.
3437       The data format for x and y. Either "NHWC" (default) or "NCHW".
3438     is_training: An optional `bool`. Defaults to `True`.
3439       A bool value to indicate the operation is for training (default)
3440       or inference.
3441     name: A name for the operation (optional).
3442 
3443   Returns:
3444     A tuple of `Tensor` objects (y, batch_mean, batch_variance, reserve_space_1, reserve_space_2).
3445 
3446     y: A `Tensor`. Has the same type as `x`.
3447     batch_mean: A `Tensor`. Has the same type as `x`.
3448     batch_variance: A `Tensor`. Has the same type as `x`.
3449     reserve_space_1: A `Tensor`. Has the same type as `x`.
3450     reserve_space_2: A `Tensor`. Has the same type as `x`.
3451   """
3452   _ctx = _context._context
3453   if _ctx is None or not _ctx._eager_context.is_eager:
3454     if epsilon is None:
3455       epsilon = 0.0001
3456     epsilon = _execute.make_float(epsilon, "epsilon")
3457     if data_format is None:
3458       data_format = "NHWC"
3459     data_format = _execute.make_str(data_format, "data_format")
3460     if is_training is None:
3461       is_training = True
3462     is_training = _execute.make_bool(is_training, "is_training")
3463     _, _, _op = _op_def_lib._apply_op_helper(
3464         "FusedBatchNorm", x=x, scale=scale, offset=offset, mean=mean,
3465         variance=variance, epsilon=epsilon, data_format=data_format,
3466         is_training=is_training, name=name)
3467     _result = _op.outputs[:]
3468     _inputs_flat = _op.inputs
3469     _attrs = ("T", _op.get_attr("T"), "epsilon", _op.get_attr("epsilon"),
3470               "data_format", _op.get_attr("data_format"), "is_training",
3471               _op.get_attr("is_training"))
3472     _execute.record_gradient(
3473       "FusedBatchNorm", _inputs_flat, _attrs, _result, name)
3474     _result = _FusedBatchNormOutput._make(_result)
3475     return _result
3476 
3477   else:
3478     try:
3479       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
3480         _ctx._context_handle, _ctx._eager_context.device_name,
3481         "FusedBatchNorm", name, _ctx._post_execution_callbacks, x, scale,
3482         offset, mean, variance, "epsilon", epsilon, "data_format",
3483         data_format, "is_training", is_training)
3484       _result = _FusedBatchNormOutput._make(_result)
3485       return _result
3486     except _core._FallbackException:
3487       return _fused_batch_norm_eager_fallback(
3488           x, scale, offset, mean, variance, epsilon=epsilon,
3489           data_format=data_format, is_training=is_training, name=name,
3490           ctx=_ctx)
3491     except _core._NotOkStatusException as e:
3492       if name is not None:
3493         message = e.message + " name: " + name
3494       else:
3495         message = e.message
3496       _six.raise_from(_core._status_to_exception(e.code, message), None)
3497 
3498 
3499 def _fused_batch_norm_eager_fallback(x, scale, offset, mean, variance, epsilon=0.0001, data_format="NHWC", is_training=True, name=None, ctx=None):
3500   r"""This is the slowpath function for Eager mode.
3501   This is for function _fused_batch_norm
3502   """
3503   _ctx = ctx if ctx else _context.context()
3504   if epsilon is None:
3505     epsilon = 0.0001
3506   epsilon = _execute.make_float(epsilon, "epsilon")
3507   if data_format is None:
3508     data_format = "NHWC"
3509   data_format = _execute.make_str(data_format, "data_format")
3510   if is_training is None:
3511     is_training = True
3512   is_training = _execute.make_bool(is_training, "is_training")
3513   _attr_T, _inputs_T = _execute.args_to_matching_eager([x, scale, offset, mean, variance], _ctx)
3514   (x, scale, offset, mean, variance) = _inputs_T
3515   _inputs_flat = [x, scale, offset, mean, variance]
3516   _attrs = ("T", _attr_T, "epsilon", epsilon, "data_format", data_format,
3517   "is_training", is_training)
3518   _result = _execute.execute(b"FusedBatchNorm", 5, inputs=_inputs_flat,
3519                              attrs=_attrs, ctx=_ctx, name=name)
3520   _execute.record_gradient(
3521       "FusedBatchNorm", _inputs_flat, _attrs, _result, name)
3522   _result = _FusedBatchNormOutput._make(_result)
3523   return _result
3524 
3525 
3526 _fused_batch_norm_grad_outputs = ["x_backprop", "scale_backprop",
3527                                  "offset_backprop", "reserve_space_3",
3528                                  "reserve_space_4"]
3529 _FusedBatchNormGradOutput = _collections.namedtuple(
3530     "FusedBatchNormGrad", _fused_batch_norm_grad_outputs)
3531 
3532 
3533 def fused_batch_norm_grad(y_backprop, x, scale, reserve_space_1, reserve_space_2, epsilon=0.0001, data_format="NHWC", is_training=True, name=None):
3534   r"""Gradient for batch normalization.
3535 
3536   Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
3537   The size of 1D Tensors matches the dimension C of the 4D Tensors.
3538 
3539   Args:
3540     y_backprop: A `Tensor`. Must be one of the following types: `float32`.
3541       A 4D Tensor for the gradient with respect to y.
3542     x: A `Tensor`. Must have the same type as `y_backprop`.
3543       A 4D Tensor for input data.
3544     scale: A `Tensor`. Must have the same type as `y_backprop`.
3545       A 1D Tensor for scaling factor, to scale the normalized x.
3546     reserve_space_1: A `Tensor`. Must have the same type as `y_backprop`.
3547       When is_training is True, a 1D Tensor for the computed batch
3548       mean to be reused in gradient computation. When is_training is
3549       False, a 1D Tensor for the population mean to be reused in both
3550       1st and 2nd order gradient computation.
3551     reserve_space_2: A `Tensor`. Must have the same type as `y_backprop`.
3552       When is_training is True, a 1D Tensor for the computed batch
3553       variance (inverted variance in the cuDNN case) to be reused in
3554       gradient computation. When is_training is False, a 1D Tensor
3555       for the population variance to be reused in both 1st and 2nd
3556       order gradient computation.
3557     epsilon: An optional `float`. Defaults to `0.0001`.
3558       A small float number added to the variance of x.
3559     data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`.
3560       The data format for y_backprop, x, x_backprop.
3561       Either "NHWC" (default) or "NCHW".
3562     is_training: An optional `bool`. Defaults to `True`.
3563       A bool value to indicate the operation is for training (default)
3564       or inference.
3565     name: A name for the operation (optional).
3566 
3567   Returns:
3568     A tuple of `Tensor` objects (x_backprop, scale_backprop, offset_backprop, reserve_space_3, reserve_space_4).
3569 
3570     x_backprop: A `Tensor`. Has the same type as `y_backprop`.
3571     scale_backprop: A `Tensor`. Has the same type as `y_backprop`.
3572     offset_backprop: A `Tensor`. Has the same type as `y_backprop`.
3573     reserve_space_3: A `Tensor`. Has the same type as `y_backprop`.
3574     reserve_space_4: A `Tensor`. Has the same type as `y_backprop`.
3575   """
3576   _ctx = _context._context
3577   if _ctx is None or not _ctx._eager_context.is_eager:
3578     if epsilon is None:
3579       epsilon = 0.0001
3580     epsilon = _execute.make_float(epsilon, "epsilon")
3581     if data_format is None:
3582       data_format = "NHWC"
3583     data_format = _execute.make_str(data_format, "data_format")
3584     if is_training is None:
3585       is_training = True
3586     is_training = _execute.make_bool(is_training, "is_training")
3587     _, _, _op = _op_def_lib._apply_op_helper(
3588         "FusedBatchNormGrad", y_backprop=y_backprop, x=x, scale=scale,
3589         reserve_space_1=reserve_space_1, reserve_space_2=reserve_space_2,
3590         epsilon=epsilon, data_format=data_format, is_training=is_training,
3591         name=name)
3592     _result = _op.outputs[:]
3593     _inputs_flat = _op.inputs
3594     _attrs = ("T", _op.get_attr("T"), "epsilon", _op.get_attr("epsilon"),
3595               "data_format", _op.get_attr("data_format"), "is_training",
3596               _op.get_attr("is_training"))
3597     _execute.record_gradient(
3598       "FusedBatchNormGrad", _inputs_flat, _attrs, _result, name)
3599     _result = _FusedBatchNormGradOutput._make(_result)
3600     return _result
3601 
3602   else:
3603     try:
3604       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
3605         _ctx._context_handle, _ctx._eager_context.device_name,
3606         "FusedBatchNormGrad", name, _ctx._post_execution_callbacks,
3607         y_backprop, x, scale, reserve_space_1, reserve_space_2, "epsilon",
3608         epsilon, "data_format", data_format, "is_training", is_training)
3609       _result = _FusedBatchNormGradOutput._make(_result)
3610       return _result
3611     except _core._FallbackException:
3612       return fused_batch_norm_grad_eager_fallback(
3613           y_backprop, x, scale, reserve_space_1, reserve_space_2,
3614           epsilon=epsilon, data_format=data_format, is_training=is_training,
3615           name=name, ctx=_ctx)
3616     except _core._NotOkStatusException as e:
3617       if name is not None:
3618         message = e.message + " name: " + name
3619       else:
3620         message = e.message
3621       _six.raise_from(_core._status_to_exception(e.code, message), None)
3622 
3623 
3624 def fused_batch_norm_grad_eager_fallback(y_backprop, x, scale, reserve_space_1, reserve_space_2, epsilon=0.0001, data_format="NHWC", is_training=True, name=None, ctx=None):
3625   r"""This is the slowpath function for Eager mode.
3626   This is for function fused_batch_norm_grad
3627   """
3628   _ctx = ctx if ctx else _context.context()
3629   if epsilon is None:
3630     epsilon = 0.0001
3631   epsilon = _execute.make_float(epsilon, "epsilon")
3632   if data_format is None:
3633     data_format = "NHWC"
3634   data_format = _execute.make_str(data_format, "data_format")
3635   if is_training is None:
3636     is_training = True
3637   is_training = _execute.make_bool(is_training, "is_training")
3638   _attr_T, _inputs_T = _execute.args_to_matching_eager([y_backprop, x, scale, reserve_space_1, reserve_space_2], _ctx)
3639   (y_backprop, x, scale, reserve_space_1, reserve_space_2) = _inputs_T
3640   _inputs_flat = [y_backprop, x, scale, reserve_space_1, reserve_space_2]
3641   _attrs = ("T", _attr_T, "epsilon", epsilon, "data_format", data_format,
3642   "is_training", is_training)
3643   _result = _execute.execute(b"FusedBatchNormGrad", 5, inputs=_inputs_flat,
3644                              attrs=_attrs, ctx=_ctx, name=name)
3645   _execute.record_gradient(
3646       "FusedBatchNormGrad", _inputs_flat, _attrs, _result, name)
3647   _result = _FusedBatchNormGradOutput._make(_result)
3648   return _result
3649 
3650 
3651 _fused_batch_norm_grad_v2_outputs = ["x_backprop", "scale_backprop",
3652                                     "offset_backprop", "reserve_space_3",
3653                                     "reserve_space_4"]
3654 _FusedBatchNormGradV2Output = _collections.namedtuple(
3655     "FusedBatchNormGradV2", _fused_batch_norm_grad_v2_outputs)
3656 
3657 
3658 def fused_batch_norm_grad_v2(y_backprop, x, scale, reserve_space_1, reserve_space_2, epsilon=0.0001, data_format="NHWC", is_training=True, name=None):
3659   r"""Gradient for batch normalization.
3660 
3661   Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
3662   The size of 1D Tensors matches the dimension C of the 4D Tensors.
3663 
3664   Args:
3665     y_backprop: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`.
3666       A 4D Tensor for the gradient with respect to y.
3667     x: A `Tensor`. Must have the same type as `y_backprop`.
3668       A 4D Tensor for input data.
3669     scale: A `Tensor` of type `float32`.
3670       A 1D Tensor for scaling factor, to scale the normalized x.
3671     reserve_space_1: A `Tensor`. Must be one of the following types: `float32`.
3672       When is_training is True, a 1D Tensor for the computed batch
3673       mean to be reused in gradient computation. When is_training is
3674       False, a 1D Tensor for the population mean to be reused in both
3675       1st and 2nd order gradient computation.
3676     reserve_space_2: A `Tensor`. Must have the same type as `reserve_space_1`.
3677       When is_training is True, a 1D Tensor for the computed batch
3678       variance (inverted variance in the cuDNN case) to be reused in
3679       gradient computation. When is_training is False, a 1D Tensor
3680       for the population variance to be reused in both 1st and 2nd
3681       order gradient computation.
3682     epsilon: An optional `float`. Defaults to `0.0001`.
3683       A small float number added to the variance of x.
3684     data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`.
3685       The data format for y_backprop, x, x_backprop.
3686       Either "NHWC" (default) or "NCHW".
3687     is_training: An optional `bool`. Defaults to `True`.
3688       A bool value to indicate the operation is for training (default)
3689       or inference.
3690     name: A name for the operation (optional).
3691 
3692   Returns:
3693     A tuple of `Tensor` objects (x_backprop, scale_backprop, offset_backprop, reserve_space_3, reserve_space_4).
3694 
3695     x_backprop: A `Tensor`. Has the same type as `y_backprop`.
3696     scale_backprop: A `Tensor`. Has the same type as `reserve_space_1`.
3697     offset_backprop: A `Tensor`. Has the same type as `reserve_space_1`.
3698     reserve_space_3: A `Tensor`. Has the same type as `reserve_space_1`.
3699     reserve_space_4: A `Tensor`. Has the same type as `reserve_space_1`.
3700   """
3701   _ctx = _context._context
3702   if _ctx is None or not _ctx._eager_context.is_eager:
3703     if epsilon is None:
3704       epsilon = 0.0001
3705     epsilon = _execute.make_float(epsilon, "epsilon")
3706     if data_format is None:
3707       data_format = "NHWC"
3708     data_format = _execute.make_str(data_format, "data_format")
3709     if is_training is None:
3710       is_training = True
3711     is_training = _execute.make_bool(is_training, "is_training")
3712     _, _, _op = _op_def_lib._apply_op_helper(
3713         "FusedBatchNormGradV2", y_backprop=y_backprop, x=x, scale=scale,
3714         reserve_space_1=reserve_space_1, reserve_space_2=reserve_space_2,
3715         epsilon=epsilon, data_format=data_format, is_training=is_training,
3716         name=name)
3717     _result = _op.outputs[:]
3718     _inputs_flat = _op.inputs
3719     _attrs = ("T", _op.get_attr("T"), "U", _op.get_attr("U"), "epsilon",
3720               _op.get_attr("epsilon"), "data_format",
3721               _op.get_attr("data_format"), "is_training",
3722               _op.get_attr("is_training"))
3723     _execute.record_gradient(
3724       "FusedBatchNormGradV2", _inputs_flat, _attrs, _result, name)
3725     _result = _FusedBatchNormGradV2Output._make(_result)
3726     return _result
3727 
3728   else:
3729     try:
3730       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
3731         _ctx._context_handle, _ctx._eager_context.device_name,
3732         "FusedBatchNormGradV2", name, _ctx._post_execution_callbacks,
3733         y_backprop, x, scale, reserve_space_1, reserve_space_2, "epsilon",
3734         epsilon, "data_format", data_format, "is_training", is_training)
3735       _result = _FusedBatchNormGradV2Output._make(_result)
3736       return _result
3737     except _core._FallbackException:
3738       return fused_batch_norm_grad_v2_eager_fallback(
3739           y_backprop, x, scale, reserve_space_1, reserve_space_2,
3740           epsilon=epsilon, data_format=data_format, is_training=is_training,
3741           name=name, ctx=_ctx)
3742     except _core._NotOkStatusException as e:
3743       if name is not None:
3744         message = e.message + " name: " + name
3745       else:
3746         message = e.message
3747       _six.raise_from(_core._status_to_exception(e.code, message), None)
3748 
3749 
3750 def fused_batch_norm_grad_v2_eager_fallback(y_backprop, x, scale, reserve_space_1, reserve_space_2, epsilon=0.0001, data_format="NHWC", is_training=True, name=None, ctx=None):
3751   r"""This is the slowpath function for Eager mode.
3752   This is for function fused_batch_norm_grad_v2
3753   """
3754   _ctx = ctx if ctx else _context.context()
3755   if epsilon is None:
3756     epsilon = 0.0001
3757   epsilon = _execute.make_float(epsilon, "epsilon")
3758   if data_format is None:
3759     data_format = "NHWC"
3760   data_format = _execute.make_str(data_format, "data_format")
3761   if is_training is None:
3762     is_training = True
3763   is_training = _execute.make_bool(is_training, "is_training")
3764   _attr_T, _inputs_T = _execute.args_to_matching_eager([y_backprop, x], _ctx)
3765   (y_backprop, x) = _inputs_T
3766   _attr_U, _inputs_U = _execute.args_to_matching_eager([reserve_space_1, reserve_space_2], _ctx)
3767   (reserve_space_1, reserve_space_2) = _inputs_U
3768   scale = _ops.convert_to_tensor(scale, _dtypes.float32)
3769   _inputs_flat = [y_backprop, x, scale, reserve_space_1, reserve_space_2]
3770   _attrs = ("T", _attr_T, "U", _attr_U, "epsilon", epsilon, "data_format",
3771   data_format, "is_training", is_training)
3772   _result = _execute.execute(b"FusedBatchNormGradV2", 5, inputs=_inputs_flat,
3773                              attrs=_attrs, ctx=_ctx, name=name)
3774   _execute.record_gradient(
3775       "FusedBatchNormGradV2", _inputs_flat, _attrs, _result, name)
3776   _result = _FusedBatchNormGradV2Output._make(_result)
3777   return _result
3778 
3779 
3780 _fused_batch_norm_v2_outputs = ["y", "batch_mean", "batch_variance",
3781                                "reserve_space_1", "reserve_space_2"]
3782 _FusedBatchNormV2Output = _collections.namedtuple(
3783     "FusedBatchNormV2", _fused_batch_norm_v2_outputs)
3784 
3785 
3786 def fused_batch_norm_v2(x, scale, offset, mean, variance, epsilon=0.0001, data_format="NHWC", is_training=True, name=None):
3787   r"""Batch normalization.
3788 
3789   Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
3790   The size of 1D Tensors matches the dimension C of the 4D Tensors.
3791 
3792   Args:
3793     x: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`.
3794       A 4D Tensor for input data.
3795     scale: A `Tensor`. Must be one of the following types: `float32`.
3796       A 1D Tensor for scaling factor, to scale the normalized x.
3797     offset: A `Tensor`. Must have the same type as `scale`.
3798       A 1D Tensor for offset, to shift to the normalized x.
3799     mean: A `Tensor`. Must have the same type as `scale`.
3800       A 1D Tensor for population mean. Used for inference only;
3801       must be empty for training.
3802     variance: A `Tensor`. Must have the same type as `scale`.
3803       A 1D Tensor for population variance. Used for inference only;
3804       must be empty for training.
3805     epsilon: An optional `float`. Defaults to `0.0001`.
3806       A small float number added to the variance of x.
3807     data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`.
3808       The data format for x and y. Either "NHWC" (default) or "NCHW".
3809     is_training: An optional `bool`. Defaults to `True`.
3810       A bool value to indicate the operation is for training (default)
3811       or inference.
3812     name: A name for the operation (optional).
3813 
3814   Returns:
3815     A tuple of `Tensor` objects (y, batch_mean, batch_variance, reserve_space_1, reserve_space_2).
3816 
3817     y: A `Tensor`. Has the same type as `x`.
3818     batch_mean: A `Tensor`. Has the same type as `scale`.
3819     batch_variance: A `Tensor`. Has the same type as `scale`.
3820     reserve_space_1: A `Tensor`. Has the same type as `scale`.
3821     reserve_space_2: A `Tensor`. Has the same type as `scale`.
3822   """
3823   _ctx = _context._context
3824   if _ctx is None or not _ctx._eager_context.is_eager:
3825     if epsilon is None:
3826       epsilon = 0.0001
3827     epsilon = _execute.make_float(epsilon, "epsilon")
3828     if data_format is None:
3829       data_format = "NHWC"
3830     data_format = _execute.make_str(data_format, "data_format")
3831     if is_training is None:
3832       is_training = True
3833     is_training = _execute.make_bool(is_training, "is_training")
3834     _, _, _op = _op_def_lib._apply_op_helper(
3835         "FusedBatchNormV2", x=x, scale=scale, offset=offset, mean=mean,
3836         variance=variance, epsilon=epsilon, data_format=data_format,
3837         is_training=is_training, name=name)
3838     _result = _op.outputs[:]
3839     _inputs_flat = _op.inputs
3840     _attrs = ("T", _op.get_attr("T"), "U", _op.get_attr("U"), "epsilon",
3841               _op.get_attr("epsilon"), "data_format",
3842               _op.get_attr("data_format"), "is_training",
3843               _op.get_attr("is_training"))
3844     _execute.record_gradient(
3845       "FusedBatchNormV2", _inputs_flat, _attrs, _result, name)
3846     _result = _FusedBatchNormV2Output._make(_result)
3847     return _result
3848 
3849   else:
3850     try:
3851       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
3852         _ctx._context_handle, _ctx._eager_context.device_name,
3853         "FusedBatchNormV2", name, _ctx._post_execution_callbacks, x, scale,
3854         offset, mean, variance, "epsilon", epsilon, "data_format",
3855         data_format, "is_training", is_training)
3856       _result = _FusedBatchNormV2Output._make(_result)
3857       return _result
3858     except _core._FallbackException:
3859       return fused_batch_norm_v2_eager_fallback(
3860           x, scale, offset, mean, variance, epsilon=epsilon,
3861           data_format=data_format, is_training=is_training, name=name,
3862           ctx=_ctx)
3863     except _core._NotOkStatusException as e:
3864       if name is not None:
3865         message = e.message + " name: " + name
3866       else:
3867         message = e.message
3868       _six.raise_from(_core._status_to_exception(e.code, message), None)
3869 
3870 
3871 def fused_batch_norm_v2_eager_fallback(x, scale, offset, mean, variance, epsilon=0.0001, data_format="NHWC", is_training=True, name=None, ctx=None):
3872   r"""This is the slowpath function for Eager mode.
3873   This is for function fused_batch_norm_v2
3874   """
3875   _ctx = ctx if ctx else _context.context()
3876   if epsilon is None:
3877     epsilon = 0.0001
3878   epsilon = _execute.make_float(epsilon, "epsilon")
3879   if data_format is None:
3880     data_format = "NHWC"
3881   data_format = _execute.make_str(data_format, "data_format")
3882   if is_training is None:
3883     is_training = True
3884   is_training = _execute.make_bool(is_training, "is_training")
3885   _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
3886   _attr_U, _inputs_U = _execute.args_to_matching_eager([scale, offset, mean, variance], _ctx)
3887   (scale, offset, mean, variance) = _inputs_U
3888   _inputs_flat = [x, scale, offset, mean, variance]
3889   _attrs = ("T", _attr_T, "U", _attr_U, "epsilon", epsilon, "data_format",
3890   data_format, "is_training", is_training)
3891   _result = _execute.execute(b"FusedBatchNormV2", 5, inputs=_inputs_flat,
3892                              attrs=_attrs, ctx=_ctx, name=name)
3893   _execute.record_gradient(
3894       "FusedBatchNormV2", _inputs_flat, _attrs, _result, name)
3895   _result = _FusedBatchNormV2Output._make(_result)
3896   return _result
3897 
3898 
3899 def fused_pad_conv2d(input, paddings, filter, mode, strides, padding, name=None):
3900   r"""Performs a padding as a preprocess during a convolution.
3901 
3902   Similar to FusedResizeAndPadConv2d, this op allows for an optimized
3903   implementation where the spatial padding transformation stage is fused with the
3904   im2col lookup, but in this case without the bilinear filtering required for
3905   resizing. Fusing the padding prevents the need to write out the intermediate
3906   results as whole tensors, reducing memory pressure, and we can get some latency
3907   gains by merging the transformation calculations.
3908   The data_format attribute for Conv2D isn't supported by this op, and 'NHWC'
3909   order is used instead.
3910   Internally this op uses a single per-graph scratch buffer, which means that it
3911   will block if multiple versions are being run in parallel. This is because this
3912   operator is primarily an optimization to minimize memory usage.
3913 
3914   Args:
3915     input: A `Tensor`. Must be one of the following types: `half`, `float32`, `float64`.
3916       4-D with shape `[batch, in_height, in_width, in_channels]`.
3917     paddings: A `Tensor` of type `int32`.
3918       A two-column matrix specifying the padding sizes. The number of
3919       rows must be the same as the rank of `input`.
3920     filter: A `Tensor`. Must have the same type as `input`. 4-D with shape
3921       `[filter_height, filter_width, in_channels, out_channels]`.
3922     mode: A `string` from: `"REFLECT", "SYMMETRIC"`.
3923     strides: A list of `ints`.
3924       1-D of length 4.  The stride of the sliding window for each dimension
3925       of `input`. Must be in the same order as the dimension specified with format.
3926     padding: A `string` from: `"SAME", "VALID"`.
3927       The type of padding algorithm to use.
3928     name: A name for the operation (optional).
3929 
3930   Returns:
3931     A `Tensor`. Has the same type as `input`.
3932   """
3933   _ctx = _context._context
3934   if _ctx is None or not _ctx._eager_context.is_eager:
3935     mode = _execute.make_str(mode, "mode")
3936     if not isinstance(strides, (list, tuple)):
3937       raise TypeError(
3938           "Expected list for 'strides' argument to "
3939           "'fused_pad_conv2d' Op, not %r." % strides)
3940     strides = [_execute.make_int(_i, "strides") for _i in strides]
3941     padding = _execute.make_str(padding, "padding")
3942     _, _, _op = _op_def_lib._apply_op_helper(
3943         "FusedPadConv2D", input=input, paddings=paddings, filter=filter,
3944         mode=mode, strides=strides, padding=padding, name=name)
3945     _result = _op.outputs[:]
3946     _inputs_flat = _op.inputs
3947     _attrs = ("T", _op.get_attr("T"), "mode", _op.get_attr("mode"), "strides",
3948               _op.get_attr("strides"), "padding", _op.get_attr("padding"))
3949     _execute.record_gradient(
3950       "FusedPadConv2D", _inputs_flat, _attrs, _result, name)
3951     _result, = _result
3952     return _result
3953 
3954   else:
3955     try:
3956       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
3957         _ctx._context_handle, _ctx._eager_context.device_name,
3958         "FusedPadConv2D", name, _ctx._post_execution_callbacks, input,
3959         paddings, filter, "mode", mode, "strides", strides, "padding",
3960         padding)
3961       return _result
3962     except _core._FallbackException:
3963       return fused_pad_conv2d_eager_fallback(
3964           input, paddings, filter, mode=mode, strides=strides,
3965           padding=padding, name=name, ctx=_ctx)
3966     except _core._NotOkStatusException as e:
3967       if name is not None:
3968         message = e.message + " name: " + name
3969       else:
3970         message = e.message
3971       _six.raise_from(_core._status_to_exception(e.code, message), None)
3972 
3973 
3974 def fused_pad_conv2d_eager_fallback(input, paddings, filter, mode, strides, padding, name=None, ctx=None):
3975   r"""This is the slowpath function for Eager mode.
3976   This is for function fused_pad_conv2d
3977   """
3978   _ctx = ctx if ctx else _context.context()
3979   mode = _execute.make_str(mode, "mode")
3980   if not isinstance(strides, (list, tuple)):
3981     raise TypeError(
3982         "Expected list for 'strides' argument to "
3983         "'fused_pad_conv2d' Op, not %r." % strides)
3984   strides = [_execute.make_int(_i, "strides") for _i in strides]
3985   padding = _execute.make_str(padding, "padding")
3986   _attr_T, _inputs_T = _execute.args_to_matching_eager([input, filter], _ctx)
3987   (input, filter) = _inputs_T
3988   paddings = _ops.convert_to_tensor(paddings, _dtypes.int32)
3989   _inputs_flat = [input, paddings, filter]
3990   _attrs = ("T", _attr_T, "mode", mode, "strides", strides, "padding",
3991   padding)
3992   _result = _execute.execute(b"FusedPadConv2D", 1, inputs=_inputs_flat,
3993                              attrs=_attrs, ctx=_ctx, name=name)
3994   _execute.record_gradient(
3995       "FusedPadConv2D", _inputs_flat, _attrs, _result, name)
3996   _result, = _result
3997   return _result
3998 
3999 
4000 def fused_resize_and_pad_conv2d(input, size, paddings, filter, mode, strides, padding, resize_align_corners=False, name=None):
4001   r"""Performs a resize and padding as a preprocess during a convolution.
4002 
4003   It's often possible to do spatial transformations more efficiently as part of
4004   the packing stage of a convolution, so this op allows for an optimized
4005   implementation where these stages are fused together. This prevents the need to
4006   write out the intermediate results as whole tensors, reducing memory pressure,
4007   and we can get some latency gains by merging the transformation calculations.
4008   The data_format attribute for Conv2D isn't supported by this op, and defaults to
4009   'NHWC' order.
4010   Internally this op uses a single per-graph scratch buffer, which means that it
4011   will block if multiple versions are being run in parallel. This is because this
4012   operator is primarily an optimization to minimize memory usage.
4013 
4014   Args:
4015     input: A `Tensor`. Must be one of the following types: `half`, `float32`, `float64`.
4016       4-D with shape `[batch, in_height, in_width, in_channels]`.
4017     size: A `Tensor` of type `int32`.
4018       A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The
4019       new size for the images.
4020     paddings: A `Tensor` of type `int32`.
4021       A two-column matrix specifying the padding sizes. The number of
4022       rows must be the same as the rank of `input`.
4023     filter: A `Tensor`. Must have the same type as `input`. 4-D with shape
4024       `[filter_height, filter_width, in_channels, out_channels]`.
4025     mode: A `string` from: `"REFLECT", "SYMMETRIC"`.
4026     strides: A list of `ints`.
4027       1-D of length 4.  The stride of the sliding window for each dimension
4028       of `input`. Must be in the same order as the dimension specified with format.
4029     padding: A `string` from: `"SAME", "VALID"`.
4030       The type of padding algorithm to use.
4031     resize_align_corners: An optional `bool`. Defaults to `False`.
4032       If true, the centers of the 4 corner pixels of the input and output tensors are
4033       aligned, preserving the values at the corner pixels. Defaults to false.
4034     name: A name for the operation (optional).
4035 
4036   Returns:
4037     A `Tensor`. Has the same type as `input`.
4038   """
4039   _ctx = _context._context
4040   if _ctx is None or not _ctx._eager_context.is_eager:
4041     mode = _execute.make_str(mode, "mode")
4042     if not isinstance(strides, (list, tuple)):
4043       raise TypeError(
4044           "Expected list for 'strides' argument to "
4045           "'fused_resize_and_pad_conv2d' Op, not %r." % strides)
4046     strides = [_execute.make_int(_i, "strides") for _i in strides]
4047     padding = _execute.make_str(padding, "padding")
4048     if resize_align_corners is None:
4049       resize_align_corners = False
4050     resize_align_corners = _execute.make_bool(resize_align_corners, "resize_align_corners")
4051     _, _, _op = _op_def_lib._apply_op_helper(
4052         "FusedResizeAndPadConv2D", input=input, size=size, paddings=paddings,
4053         filter=filter, mode=mode, strides=strides, padding=padding,
4054         resize_align_corners=resize_align_corners, name=name)
4055     _result = _op.outputs[:]
4056     _inputs_flat = _op.inputs
4057     _attrs = ("T", _op.get_attr("T"), "resize_align_corners",
4058               _op.get_attr("resize_align_corners"), "mode",
4059               _op.get_attr("mode"), "strides", _op.get_attr("strides"),
4060               "padding", _op.get_attr("padding"))
4061     _execute.record_gradient(
4062       "FusedResizeAndPadConv2D", _inputs_flat, _attrs, _result, name)
4063     _result, = _result
4064     return _result
4065 
4066   else:
4067     try:
4068       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
4069         _ctx._context_handle, _ctx._eager_context.device_name,
4070         "FusedResizeAndPadConv2D", name, _ctx._post_execution_callbacks,
4071         input, size, paddings, filter, "resize_align_corners",
4072         resize_align_corners, "mode", mode, "strides", strides, "padding",
4073         padding)
4074       return _result
4075     except _core._FallbackException:
4076       return fused_resize_and_pad_conv2d_eager_fallback(
4077           input, size, paddings, filter,
4078           resize_align_corners=resize_align_corners, mode=mode,
4079           strides=strides, padding=padding, name=name, ctx=_ctx)
4080     except _core._NotOkStatusException as e:
4081       if name is not None:
4082         message = e.message + " name: " + name
4083       else:
4084         message = e.message
4085       _six.raise_from(_core._status_to_exception(e.code, message), None)
4086 
4087 
4088 def fused_resize_and_pad_conv2d_eager_fallback(input, size, paddings, filter, mode, strides, padding, resize_align_corners=False, name=None, ctx=None):
4089   r"""This is the slowpath function for Eager mode.
4090   This is for function fused_resize_and_pad_conv2d
4091   """
4092   _ctx = ctx if ctx else _context.context()
4093   mode = _execute.make_str(mode, "mode")
4094   if not isinstance(strides, (list, tuple)):
4095     raise TypeError(
4096         "Expected list for 'strides' argument to "
4097         "'fused_resize_and_pad_conv2d' Op, not %r." % strides)
4098   strides = [_execute.make_int(_i, "strides") for _i in strides]
4099   padding = _execute.make_str(padding, "padding")
4100   if resize_align_corners is None:
4101     resize_align_corners = False
4102   resize_align_corners = _execute.make_bool(resize_align_corners, "resize_align_corners")
4103   _attr_T, _inputs_T = _execute.args_to_matching_eager([input, filter], _ctx)
4104   (input, filter) = _inputs_T
4105   size = _ops.convert_to_tensor(size, _dtypes.int32)
4106   paddings = _ops.convert_to_tensor(paddings, _dtypes.int32)
4107   _inputs_flat = [input, size, paddings, filter]
4108   _attrs = ("T", _attr_T, "resize_align_corners", resize_align_corners,
4109   "mode", mode, "strides", strides, "padding", padding)
4110   _result = _execute.execute(b"FusedResizeAndPadConv2D", 1,
4111                              inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
4112                              name=name)
4113   _execute.record_gradient(
4114       "FusedResizeAndPadConv2D", _inputs_flat, _attrs, _result, name)
4115   _result, = _result
4116   return _result
4117 
4118 
4119 def in_top_k(predictions, targets, k, name=None):
4120   r"""Says whether the targets are in the top `K` predictions.
4121 
4122   This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the
4123   prediction for the target class is among the top `k` predictions among
4124   all predictions for example `i`. Note that the behavior of `InTopK` differs
4125   from the `TopK` op in its handling of ties; if multiple classes have the
4126   same prediction value and straddle the top-`k` boundary, all of those
4127   classes are considered to be in the top `k`.
4128 
4129   More formally, let
4130 
4131     \\(predictions_i\\) be the predictions for all classes for example `i`,
4132     \\(targets_i\\) be the target class for example `i`,
4133     \\(out_i\\) be the output for example `i`,
4134 
4135   $$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$
4136 
4137   Args:
4138     predictions: A `Tensor` of type `float32`.
4139       A `batch_size` x `classes` tensor.
4140     targets: A `Tensor`. Must be one of the following types: `int32`, `int64`.
4141       A `batch_size` vector of class ids.
4142     k: An `int`. Number of top elements to look at for computing precision.
4143     name: A name for the operation (optional).
4144 
4145   Returns:
4146     A `Tensor` of type `bool`.
4147   """
4148   _ctx = _context._context
4149   if _ctx is None or not _ctx._eager_context.is_eager:
4150     k = _execute.make_int(k, "k")
4151     _, _, _op = _op_def_lib._apply_op_helper(
4152         "InTopK", predictions=predictions, targets=targets, k=k, name=name)
4153     _result = _op.outputs[:]
4154     _inputs_flat = _op.inputs
4155     _attrs = ("k", _op.get_attr("k"), "T", _op.get_attr("T"))
4156     _execute.record_gradient(
4157       "InTopK", _inputs_flat, _attrs, _result, name)
4158     _result, = _result
4159     return _result
4160 
4161   else:
4162     try:
4163       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
4164         _ctx._context_handle, _ctx._eager_context.device_name, "InTopK", name,
4165         _ctx._post_execution_callbacks, predictions, targets, "k", k)
4166       return _result
4167     except _core._FallbackException:
4168       return in_top_k_eager_fallback(
4169           predictions, targets, k=k, name=name, ctx=_ctx)
4170     except _core._NotOkStatusException as e:
4171       if name is not None:
4172         message = e.message + " name: " + name
4173       else:
4174         message = e.message
4175       _six.raise_from(_core._status_to_exception(e.code, message), None)
4176 
4177 
4178 def in_top_k_eager_fallback(predictions, targets, k, name=None, ctx=None):
4179   r"""This is the slowpath function for Eager mode.
4180   This is for function in_top_k
4181   """
4182   _ctx = ctx if ctx else _context.context()
4183   k = _execute.make_int(k, "k")
4184   _attr_T, (targets,) = _execute.args_to_matching_eager([targets], _ctx, _dtypes.int32)
4185   predictions = _ops.convert_to_tensor(predictions, _dtypes.float32)
4186   _inputs_flat = [predictions, targets]
4187   _attrs = ("k", k, "T", _attr_T)
4188   _result = _execute.execute(b"InTopK", 1, inputs=_inputs_flat, attrs=_attrs,
4189                              ctx=_ctx, name=name)
4190   _execute.record_gradient(
4191       "InTopK", _inputs_flat, _attrs, _result, name)
4192   _result, = _result
4193   return _result
4194 
4195 
4196 def in_top_kv2(predictions, targets, k, name=None):
4197   r"""Says whether the targets are in the top `K` predictions.
4198 
4199   This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the
4200   prediction for the target class is among the top `k` predictions among
4201   all predictions for example `i`. Note that the behavior of `InTopK` differs
4202   from the `TopK` op in its handling of ties; if multiple classes have the
4203   same prediction value and straddle the top-`k` boundary, all of those
4204   classes are considered to be in the top `k`.
4205 
4206   More formally, let
4207 
4208     \\(predictions_i\\) be the predictions for all classes for example `i`,
4209     \\(targets_i\\) be the target class for example `i`,
4210     \\(out_i\\) be the output for example `i`,
4211 
4212   $$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$
4213 
4214   Args:
4215     predictions: A `Tensor` of type `float32`.
4216       A `batch_size` x `classes` tensor.
4217     targets: A `Tensor`. Must be one of the following types: `int32`, `int64`.
4218       A `batch_size` vector of class ids.
4219     k: A `Tensor`. Must have the same type as `targets`.
4220       Number of top elements to look at for computing precision.
4221     name: A name for the operation (optional).
4222 
4223   Returns:
4224     A `Tensor` of type `bool`.
4225   """
4226   _ctx = _context._context
4227   if _ctx is None or not _ctx._eager_context.is_eager:
4228     _, _, _op = _op_def_lib._apply_op_helper(
4229         "InTopKV2", predictions=predictions, targets=targets, k=k, name=name)
4230     _result = _op.outputs[:]
4231     _inputs_flat = _op.inputs
4232     _attrs = ("T", _op.get_attr("T"))
4233     _execute.record_gradient(
4234       "InTopKV2", _inputs_flat, _attrs, _result, name)
4235     _result, = _result
4236     return _result
4237 
4238   else:
4239     try:
4240       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
4241         _ctx._context_handle, _ctx._eager_context.device_name, "InTopKV2",
4242         name, _ctx._post_execution_callbacks, predictions, targets, k)
4243       return _result
4244     except _core._FallbackException:
4245       return in_top_kv2_eager_fallback(
4246           predictions, targets, k, name=name, ctx=_ctx)
4247     except _core._NotOkStatusException as e:
4248       if name is not None:
4249         message = e.message + " name: " + name
4250       else:
4251         message = e.message
4252       _six.raise_from(_core._status_to_exception(e.code, message), None)
4253 
4254 
4255 def in_top_kv2_eager_fallback(predictions, targets, k, name=None, ctx=None):
4256   r"""This is the slowpath function for Eager mode.
4257   This is for function in_top_kv2
4258   """
4259   _ctx = ctx if ctx else _context.context()
4260   _attr_T, _inputs_T = _execute.args_to_matching_eager([targets, k], _ctx, _dtypes.int32)
4261   (targets, k) = _inputs_T
4262   predictions = _ops.convert_to_tensor(predictions, _dtypes.float32)
4263   _inputs_flat = [predictions, targets, k]
4264   _attrs = ("T", _attr_T)
4265   _result = _execute.execute(b"InTopKV2", 1, inputs=_inputs_flat,
4266                              attrs=_attrs, ctx=_ctx, name=name)
4267   _execute.record_gradient(
4268       "InTopKV2", _inputs_flat, _attrs, _result, name)
4269   _result, = _result
4270   return _result
4271 
4272 
4273 @tf_export('nn.l2_loss')
4274 def l2_loss(t, name=None):
4275   r"""L2 Loss.
4276 
4277   Computes half the L2 norm of a tensor without the `sqrt`:
4278 
4279       output = sum(t ** 2) / 2
4280 
4281   Args:
4282     t: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
4283       Typically 2-D, but may have any dimensions.
4284     name: A name for the operation (optional).
4285 
4286   Returns:
4287     A `Tensor`. Has the same type as `t`.
4288   """
4289   _ctx = _context._context
4290   if _ctx is None or not _ctx._eager_context.is_eager:
4291     _, _, _op = _op_def_lib._apply_op_helper(
4292         "L2Loss", t=t, name=name)
4293     _result = _op.outputs[:]
4294     _inputs_flat = _op.inputs
4295     _attrs = ("T", _op.get_attr("T"))
4296     _execute.record_gradient(
4297       "L2Loss", _inputs_flat, _attrs, _result, name)
4298     _result, = _result
4299     return _result
4300 
4301   else:
4302     try:
4303       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
4304         _ctx._context_handle, _ctx._eager_context.device_name, "L2Loss", name,
4305         _ctx._post_execution_callbacks, t)
4306       return _result
4307     except _core._FallbackException:
4308       return l2_loss_eager_fallback(
4309           t, name=name, ctx=_ctx)
4310     except _core._NotOkStatusException as e:
4311       if name is not None:
4312         message = e.message + " name: " + name
4313       else:
4314         message = e.message
4315       _six.raise_from(_core._status_to_exception(e.code, message), None)
4316 
4317 
4318 def l2_loss_eager_fallback(t, name=None, ctx=None):
4319   r"""This is the slowpath function for Eager mode.
4320   This is for function l2_loss
4321   """
4322   _ctx = ctx if ctx else _context.context()
4323   _attr_T, (t,) = _execute.args_to_matching_eager([t], _ctx)
4324   _inputs_flat = [t]
4325   _attrs = ("T", _attr_T)
4326   _result = _execute.execute(b"L2Loss", 1, inputs=_inputs_flat, attrs=_attrs,
4327                              ctx=_ctx, name=name)
4328   _execute.record_gradient(
4329       "L2Loss", _inputs_flat, _attrs, _result, name)
4330   _result, = _result
4331   return _result
4332 
4333 
4334 @tf_export('nn.local_response_normalization', 'nn.lrn')
4335 def lrn(input, depth_radius=5, bias=1, alpha=1, beta=0.5, name=None):
4336   r"""Local Response Normalization.
4337 
4338   The 4-D `input` tensor is treated as a 3-D array of 1-D vectors (along the last
4339   dimension), and each vector is normalized independently.  Within a given vector,
4340   each component is divided by the weighted, squared sum of inputs within
4341   `depth_radius`.  In detail,
4342 
4343       sqr_sum[a, b, c, d] =
4344           sum(input[a, b, c, d - depth_radius : d + depth_radius + 1] ** 2)
4345       output = input / (bias + alpha * sqr_sum) ** beta
4346 
4347   For details, see [Krizhevsky et al., ImageNet classification with deep
4348   convolutional neural networks (NIPS 2012)](http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks).
4349 
4350   Args:
4351     input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`.
4352       4-D.
4353     depth_radius: An optional `int`. Defaults to `5`.
4354       0-D.  Half-width of the 1-D normalization window.
4355     bias: An optional `float`. Defaults to `1`.
4356       An offset (usually positive to avoid dividing by 0).
4357     alpha: An optional `float`. Defaults to `1`.
4358       A scale factor, usually positive.
4359     beta: An optional `float`. Defaults to `0.5`. An exponent.
4360     name: A name for the operation (optional).
4361 
4362   Returns:
4363     A `Tensor`. Has the same type as `input`.
4364   """
4365   _ctx = _context._context
4366   if _ctx is None or not _ctx._eager_context.is_eager:
4367     if depth_radius is None:
4368       depth_radius = 5
4369     depth_radius = _execute.make_int(depth_radius, "depth_radius")
4370     if bias is None:
4371       bias = 1
4372     bias = _execute.make_float(bias, "bias")
4373     if alpha is None:
4374       alpha = 1
4375     alpha = _execute.make_float(alpha, "alpha")
4376     if beta is None:
4377       beta = 0.5
4378     beta = _execute.make_float(beta, "beta")
4379     _, _, _op = _op_def_lib._apply_op_helper(
4380         "LRN", input=input, depth_radius=depth_radius, bias=bias, alpha=alpha,
4381         beta=beta, name=name)
4382     _result = _op.outputs[:]
4383     _inputs_flat = _op.inputs
4384     _attrs = ("depth_radius", _op.get_attr("depth_radius"), "bias",
4385               _op.get_attr("bias"), "alpha", _op.get_attr("alpha"), "beta",
4386               _op.get_attr("beta"), "T", _op.get_attr("T"))
4387     _execute.record_gradient(
4388       "LRN", _inputs_flat, _attrs, _result, name)
4389     _result, = _result
4390     return _result
4391 
4392   else:
4393     try:
4394       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
4395         _ctx._context_handle, _ctx._eager_context.device_name, "LRN", name,
4396         _ctx._post_execution_callbacks, input, "depth_radius", depth_radius,
4397         "bias", bias, "alpha", alpha, "beta", beta)
4398       return _result
4399     except _core._FallbackException:
4400       return lrn_eager_fallback(
4401           input, depth_radius=depth_radius, bias=bias, alpha=alpha, beta=beta,
4402           name=name, ctx=_ctx)
4403     except _core._NotOkStatusException as e:
4404       if name is not None:
4405         message = e.message + " name: " + name
4406       else:
4407         message = e.message
4408       _six.raise_from(_core._status_to_exception(e.code, message), None)
4409 
4410 
4411 def lrn_eager_fallback(input, depth_radius=5, bias=1, alpha=1, beta=0.5, name=None, ctx=None):
4412   r"""This is the slowpath function for Eager mode.
4413   This is for function lrn
4414   """
4415   _ctx = ctx if ctx else _context.context()
4416   if depth_radius is None:
4417     depth_radius = 5
4418   depth_radius = _execute.make_int(depth_radius, "depth_radius")
4419   if bias is None:
4420     bias = 1
4421   bias = _execute.make_float(bias, "bias")
4422   if alpha is None:
4423     alpha = 1
4424   alpha = _execute.make_float(alpha, "alpha")
4425   if beta is None:
4426     beta = 0.5
4427   beta = _execute.make_float(beta, "beta")
4428   _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx, _dtypes.float32)
4429   _inputs_flat = [input]
4430   _attrs = ("depth_radius", depth_radius, "bias", bias, "alpha", alpha,
4431   "beta", beta, "T", _attr_T)
4432   _result = _execute.execute(b"LRN", 1, inputs=_inputs_flat, attrs=_attrs,
4433                              ctx=_ctx, name=name)
4434   _execute.record_gradient(
4435       "LRN", _inputs_flat, _attrs, _result, name)
4436   _result, = _result
4437   return _result
4438 
4439 
4440 def lrn_grad(input_grads, input_image, output_image, depth_radius=5, bias=1, alpha=1, beta=0.5, name=None):
4441   r"""Gradients for Local Response Normalization.
4442 
4443   Args:
4444     input_grads: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`.
4445       4-D with shape `[batch, height, width, channels]`.
4446     input_image: A `Tensor`. Must have the same type as `input_grads`.
4447       4-D with shape `[batch, height, width, channels]`.
4448     output_image: A `Tensor`. Must have the same type as `input_grads`.
4449       4-D with shape `[batch, height, width, channels]`.
4450     depth_radius: An optional `int`. Defaults to `5`. A depth radius.
4451     bias: An optional `float`. Defaults to `1`.
4452       An offset (usually > 0 to avoid dividing by 0).
4453     alpha: An optional `float`. Defaults to `1`.
4454       A scale factor, usually positive.
4455     beta: An optional `float`. Defaults to `0.5`. An exponent.
4456     name: A name for the operation (optional).
4457 
4458   Returns:
4459     A `Tensor`. Has the same type as `input_grads`.
4460   """
4461   _ctx = _context._context
4462   if _ctx is None or not _ctx._eager_context.is_eager:
4463     if depth_radius is None:
4464       depth_radius = 5
4465     depth_radius = _execute.make_int(depth_radius, "depth_radius")
4466     if bias is None:
4467       bias = 1
4468     bias = _execute.make_float(bias, "bias")
4469     if alpha is None:
4470       alpha = 1
4471     alpha = _execute.make_float(alpha, "alpha")
4472     if beta is None:
4473       beta = 0.5
4474     beta = _execute.make_float(beta, "beta")
4475     _, _, _op = _op_def_lib._apply_op_helper(
4476         "LRNGrad", input_grads=input_grads, input_image=input_image,
4477         output_image=output_image, depth_radius=depth_radius, bias=bias,
4478         alpha=alpha, beta=beta, name=name)
4479     _result = _op.outputs[:]
4480     _inputs_flat = _op.inputs
4481     _attrs = ("depth_radius", _op.get_attr("depth_radius"), "bias",
4482               _op.get_attr("bias"), "alpha", _op.get_attr("alpha"), "beta",
4483               _op.get_attr("beta"), "T", _op.get_attr("T"))
4484     _execute.record_gradient(
4485       "LRNGrad", _inputs_flat, _attrs, _result, name)
4486     _result, = _result
4487     return _result
4488 
4489   else:
4490     try:
4491       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
4492         _ctx._context_handle, _ctx._eager_context.device_name, "LRNGrad",
4493         name, _ctx._post_execution_callbacks, input_grads, input_image,
4494         output_image, "depth_radius", depth_radius, "bias", bias, "alpha",
4495         alpha, "beta", beta)
4496       return _result
4497     except _core._FallbackException:
4498       return lrn_grad_eager_fallback(
4499           input_grads, input_image, output_image, depth_radius=depth_radius,
4500           bias=bias, alpha=alpha, beta=beta, name=name, ctx=_ctx)
4501     except _core._NotOkStatusException as e:
4502       if name is not None:
4503         message = e.message + " name: " + name
4504       else:
4505         message = e.message
4506       _six.raise_from(_core._status_to_exception(e.code, message), None)
4507 
4508 
4509 def lrn_grad_eager_fallback(input_grads, input_image, output_image, depth_radius=5, bias=1, alpha=1, beta=0.5, name=None, ctx=None):
4510   r"""This is the slowpath function for Eager mode.
4511   This is for function lrn_grad
4512   """
4513   _ctx = ctx if ctx else _context.context()
4514   if depth_radius is None:
4515     depth_radius = 5
4516   depth_radius = _execute.make_int(depth_radius, "depth_radius")
4517   if bias is None:
4518     bias = 1
4519   bias = _execute.make_float(bias, "bias")
4520   if alpha is None:
4521     alpha = 1
4522   alpha = _execute.make_float(alpha, "alpha")
4523   if beta is None:
4524     beta = 0.5
4525   beta = _execute.make_float(beta, "beta")
4526   _attr_T, _inputs_T = _execute.args_to_matching_eager([input_grads, input_image, output_image], _ctx, _dtypes.float32)
4527   (input_grads, input_image, output_image) = _inputs_T
4528   _inputs_flat = [input_grads, input_image, output_image]
4529   _attrs = ("depth_radius", depth_radius, "bias", bias, "alpha", alpha,
4530   "beta", beta, "T", _attr_T)
4531   _result = _execute.execute(b"LRNGrad", 1, inputs=_inputs_flat, attrs=_attrs,
4532                              ctx=_ctx, name=name)
4533   _execute.record_gradient(
4534       "LRNGrad", _inputs_flat, _attrs, _result, name)
4535   _result, = _result
4536   return _result
4537 
4538 
4539 def log_softmax(logits, name=None):
4540   r"""Computes log softmax activations.
4541 
4542   For each batch `i` and class `j` we have
4543 
4544       logsoftmax[i, j] = logits[i, j] - log(sum(exp(logits[i])))
4545 
4546   Args:
4547     logits: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
4548       2-D with shape `[batch_size, num_classes]`.
4549     name: A name for the operation (optional).
4550 
4551   Returns:
4552     A `Tensor`. Has the same type as `logits`.
4553   """
4554   _ctx = _context._context
4555   if _ctx is None or not _ctx._eager_context.is_eager:
4556     _, _, _op = _op_def_lib._apply_op_helper(
4557         "LogSoftmax", logits=logits, name=name)
4558     _result = _op.outputs[:]
4559     _inputs_flat = _op.inputs
4560     _attrs = ("T", _op.get_attr("T"))
4561     _execute.record_gradient(
4562       "LogSoftmax", _inputs_flat, _attrs, _result, name)
4563     _result, = _result
4564     return _result
4565 
4566   else:
4567     try:
4568       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
4569         _ctx._context_handle, _ctx._eager_context.device_name, "LogSoftmax",
4570         name, _ctx._post_execution_callbacks, logits)
4571       return _result
4572     except _core._FallbackException:
4573       return log_softmax_eager_fallback(
4574           logits, name=name, ctx=_ctx)
4575     except _core._NotOkStatusException as e:
4576       if name is not None:
4577         message = e.message + " name: " + name
4578       else:
4579         message = e.message
4580       _six.raise_from(_core._status_to_exception(e.code, message), None)
4581 
4582 
4583 def log_softmax_eager_fallback(logits, name=None, ctx=None):
4584   r"""This is the slowpath function for Eager mode.
4585   This is for function log_softmax
4586   """
4587   _ctx = ctx if ctx else _context.context()
4588   _attr_T, (logits,) = _execute.args_to_matching_eager([logits], _ctx)
4589   _inputs_flat = [logits]
4590   _attrs = ("T", _attr_T)
4591   _result = _execute.execute(b"LogSoftmax", 1, inputs=_inputs_flat,
4592                              attrs=_attrs, ctx=_ctx, name=name)
4593   _execute.record_gradient(
4594       "LogSoftmax", _inputs_flat, _attrs, _result, name)
4595   _result, = _result
4596   return _result
4597 
4598 
4599 def max_pool(input, ksize, strides, padding, data_format="NHWC", name=None):
4600   r"""Performs max pooling on the input.
4601 
4602   Args:
4603     input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`, `int32`, `int64`, `uint8`, `int16`, `int8`, `uint16`, `qint8`.
4604       4-D input to pool over.
4605     ksize: A list of `ints` that has length `>= 4`.
4606       The size of the window for each dimension of the input tensor.
4607     strides: A list of `ints` that has length `>= 4`.
4608       The stride of the sliding window for each dimension of the
4609       input tensor.
4610     padding: A `string` from: `"SAME", "VALID"`.
4611       The type of padding algorithm to use.
4612     data_format: An optional `string` from: `"NHWC", "NCHW", "NCHW_VECT_C"`. Defaults to `"NHWC"`.
4613       Specify the data format of the input and output data. With the
4614       default format "NHWC", the data is stored in the order of:
4615           [batch, in_height, in_width, in_channels].
4616       Alternatively, the format could be "NCHW", the data storage order of:
4617           [batch, in_channels, in_height, in_width].
4618     name: A name for the operation (optional).
4619 
4620   Returns:
4621     A `Tensor`. Has the same type as `input`.
4622   """
4623   _ctx = _context._context
4624   if _ctx is None or not _ctx._eager_context.is_eager:
4625     if not isinstance(ksize, (list, tuple)):
4626       raise TypeError(
4627           "Expected list for 'ksize' argument to "
4628           "'max_pool' Op, not %r." % ksize)
4629     ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
4630     if not isinstance(strides, (list, tuple)):
4631       raise TypeError(
4632           "Expected list for 'strides' argument to "
4633           "'max_pool' Op, not %r." % strides)
4634     strides = [_execute.make_int(_i, "strides") for _i in strides]
4635     padding = _execute.make_str(padding, "padding")
4636     if data_format is None:
4637       data_format = "NHWC"
4638     data_format = _execute.make_str(data_format, "data_format")
4639     _, _, _op = _op_def_lib._apply_op_helper(
4640         "MaxPool", input=input, ksize=ksize, strides=strides, padding=padding,
4641         data_format=data_format, name=name)
4642     _result = _op.outputs[:]
4643     _inputs_flat = _op.inputs
4644     _attrs = ("T", _op.get_attr("T"), "ksize", _op.get_attr("ksize"),
4645               "strides", _op.get_attr("strides"), "padding",
4646               _op.get_attr("padding"), "data_format",
4647               _op.get_attr("data_format"))
4648     _execute.record_gradient(
4649       "MaxPool", _inputs_flat, _attrs, _result, name)
4650     _result, = _result
4651     return _result
4652 
4653   else:
4654     try:
4655       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
4656         _ctx._context_handle, _ctx._eager_context.device_name, "MaxPool",
4657         name, _ctx._post_execution_callbacks, input, "ksize", ksize,
4658         "strides", strides, "padding", padding, "data_format", data_format)
4659       return _result
4660     except _core._FallbackException:
4661       return max_pool_eager_fallback(
4662           input, ksize=ksize, strides=strides, padding=padding,
4663           data_format=data_format, name=name, ctx=_ctx)
4664     except _core._NotOkStatusException as e:
4665       if name is not None:
4666         message = e.message + " name: " + name
4667       else:
4668         message = e.message
4669       _six.raise_from(_core._status_to_exception(e.code, message), None)
4670 
4671 
4672 def max_pool_eager_fallback(input, ksize, strides, padding, data_format="NHWC", name=None, ctx=None):
4673   r"""This is the slowpath function for Eager mode.
4674   This is for function max_pool
4675   """
4676   _ctx = ctx if ctx else _context.context()
4677   if not isinstance(ksize, (list, tuple)):
4678     raise TypeError(
4679         "Expected list for 'ksize' argument to "
4680         "'max_pool' Op, not %r." % ksize)
4681   ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
4682   if not isinstance(strides, (list, tuple)):
4683     raise TypeError(
4684         "Expected list for 'strides' argument to "
4685         "'max_pool' Op, not %r." % strides)
4686   strides = [_execute.make_int(_i, "strides") for _i in strides]
4687   padding = _execute.make_str(padding, "padding")
4688   if data_format is None:
4689     data_format = "NHWC"
4690   data_format = _execute.make_str(data_format, "data_format")
4691   _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx, _dtypes.float32)
4692   _inputs_flat = [input]
4693   _attrs = ("T", _attr_T, "ksize", ksize, "strides", strides, "padding",
4694   padding, "data_format", data_format)
4695   _result = _execute.execute(b"MaxPool", 1, inputs=_inputs_flat, attrs=_attrs,
4696                              ctx=_ctx, name=name)
4697   _execute.record_gradient(
4698       "MaxPool", _inputs_flat, _attrs, _result, name)
4699   _result, = _result
4700   return _result
4701 
4702 
4703 @tf_export('nn.max_pool3d')
4704 def max_pool3d(input, ksize, strides, padding, data_format="NDHWC", name=None):
4705   r"""Performs 3D max pooling on the input.
4706 
4707   Args:
4708     input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`.
4709       Shape `[batch, depth, rows, cols, channels]` tensor to pool over.
4710     ksize: A list of `ints` that has length `>= 5`.
4711       1-D tensor of length 5. The size of the window for each dimension of
4712       the input tensor. Must have `ksize[0] = ksize[4] = 1`.
4713     strides: A list of `ints` that has length `>= 5`.
4714       1-D tensor of length 5. The stride of the sliding window for each
4715       dimension of `input`. Must have `strides[0] = strides[4] = 1`.
4716     padding: A `string` from: `"SAME", "VALID"`.
4717       The type of padding algorithm to use.
4718     data_format: An optional `string` from: `"NDHWC", "NCDHW"`. Defaults to `"NDHWC"`.
4719       The data format of the input and output data. With the
4720       default format "NDHWC", the data is stored in the order of:
4721           [batch, in_depth, in_height, in_width, in_channels].
4722       Alternatively, the format could be "NCDHW", the data storage order is:
4723           [batch, in_channels, in_depth, in_height, in_width].
4724     name: A name for the operation (optional).
4725 
4726   Returns:
4727     A `Tensor`. Has the same type as `input`.
4728   """
4729   _ctx = _context._context
4730   if _ctx is None or not _ctx._eager_context.is_eager:
4731     if not isinstance(ksize, (list, tuple)):
4732       raise TypeError(
4733           "Expected list for 'ksize' argument to "
4734           "'max_pool3d' Op, not %r." % ksize)
4735     ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
4736     if not isinstance(strides, (list, tuple)):
4737       raise TypeError(
4738           "Expected list for 'strides' argument to "
4739           "'max_pool3d' Op, not %r." % strides)
4740     strides = [_execute.make_int(_i, "strides") for _i in strides]
4741     padding = _execute.make_str(padding, "padding")
4742     if data_format is None:
4743       data_format = "NDHWC"
4744     data_format = _execute.make_str(data_format, "data_format")
4745     _, _, _op = _op_def_lib._apply_op_helper(
4746         "MaxPool3D", input=input, ksize=ksize, strides=strides,
4747         padding=padding, data_format=data_format, name=name)
4748     _result = _op.outputs[:]
4749     _inputs_flat = _op.inputs
4750     _attrs = ("ksize", _op.get_attr("ksize"), "strides",
4751               _op.get_attr("strides"), "padding", _op.get_attr("padding"),
4752               "data_format", _op.get_attr("data_format"), "T",
4753               _op.get_attr("T"))
4754     _execute.record_gradient(
4755       "MaxPool3D", _inputs_flat, _attrs, _result, name)
4756     _result, = _result
4757     return _result
4758 
4759   else:
4760     try:
4761       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
4762         _ctx._context_handle, _ctx._eager_context.device_name, "MaxPool3D",
4763         name, _ctx._post_execution_callbacks, input, "ksize", ksize,
4764         "strides", strides, "padding", padding, "data_format", data_format)
4765       return _result
4766     except _core._FallbackException:
4767       return max_pool3d_eager_fallback(
4768           input, ksize=ksize, strides=strides, padding=padding,
4769           data_format=data_format, name=name, ctx=_ctx)
4770     except _core._NotOkStatusException as e:
4771       if name is not None:
4772         message = e.message + " name: " + name
4773       else:
4774         message = e.message
4775       _six.raise_from(_core._status_to_exception(e.code, message), None)
4776 
4777 
4778 def max_pool3d_eager_fallback(input, ksize, strides, padding, data_format="NDHWC", name=None, ctx=None):
4779   r"""This is the slowpath function for Eager mode.
4780   This is for function max_pool3d
4781   """
4782   _ctx = ctx if ctx else _context.context()
4783   if not isinstance(ksize, (list, tuple)):
4784     raise TypeError(
4785         "Expected list for 'ksize' argument to "
4786         "'max_pool3d' Op, not %r." % ksize)
4787   ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
4788   if not isinstance(strides, (list, tuple)):
4789     raise TypeError(
4790         "Expected list for 'strides' argument to "
4791         "'max_pool3d' Op, not %r." % strides)
4792   strides = [_execute.make_int(_i, "strides") for _i in strides]
4793   padding = _execute.make_str(padding, "padding")
4794   if data_format is None:
4795     data_format = "NDHWC"
4796   data_format = _execute.make_str(data_format, "data_format")
4797   _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
4798   _inputs_flat = [input]
4799   _attrs = ("ksize", ksize, "strides", strides, "padding", padding,
4800   "data_format", data_format, "T", _attr_T)
4801   _result = _execute.execute(b"MaxPool3D", 1, inputs=_inputs_flat,
4802                              attrs=_attrs, ctx=_ctx, name=name)
4803   _execute.record_gradient(
4804       "MaxPool3D", _inputs_flat, _attrs, _result, name)
4805   _result, = _result
4806   return _result
4807 
4808 
4809 def max_pool3d_grad(orig_input, orig_output, grad, ksize, strides, padding, data_format="NDHWC", name=None):
4810   r"""Computes gradients of max pooling function.
4811 
4812   Args:
4813     orig_input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`.
4814       The original input tensor.
4815     orig_output: A `Tensor`. Must have the same type as `orig_input`.
4816       The original output tensor.
4817     grad: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`.
4818       Output backprop of shape `[batch, depth, rows, cols, channels]`.
4819     ksize: A list of `ints` that has length `>= 5`.
4820       1-D tensor of length 5. The size of the window for each dimension of
4821       the input tensor. Must have `ksize[0] = ksize[4] = 1`.
4822     strides: A list of `ints` that has length `>= 5`.
4823       1-D tensor of length 5. The stride of the sliding window for each
4824       dimension of `input`. Must have `strides[0] = strides[4] = 1`.
4825     padding: A `string` from: `"SAME", "VALID"`.
4826       The type of padding algorithm to use.
4827     data_format: An optional `string` from: `"NDHWC", "NCDHW"`. Defaults to `"NDHWC"`.
4828       The data format of the input and output data. With the
4829       default format "NDHWC", the data is stored in the order of:
4830           [batch, in_depth, in_height, in_width, in_channels].
4831       Alternatively, the format could be "NCDHW", the data storage order is:
4832           [batch, in_channels, in_depth, in_height, in_width].
4833     name: A name for the operation (optional).
4834 
4835   Returns:
4836     A `Tensor`. Has the same type as `grad`.
4837   """
4838   _ctx = _context._context
4839   if _ctx is None or not _ctx._eager_context.is_eager:
4840     if not isinstance(ksize, (list, tuple)):
4841       raise TypeError(
4842           "Expected list for 'ksize' argument to "
4843           "'max_pool3d_grad' Op, not %r." % ksize)
4844     ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
4845     if not isinstance(strides, (list, tuple)):
4846       raise TypeError(
4847           "Expected list for 'strides' argument to "
4848           "'max_pool3d_grad' Op, not %r." % strides)
4849     strides = [_execute.make_int(_i, "strides") for _i in strides]
4850     padding = _execute.make_str(padding, "padding")
4851     if data_format is None:
4852       data_format = "NDHWC"
4853     data_format = _execute.make_str(data_format, "data_format")
4854     _, _, _op = _op_def_lib._apply_op_helper(
4855         "MaxPool3DGrad", orig_input=orig_input, orig_output=orig_output,
4856         grad=grad, ksize=ksize, strides=strides, padding=padding,
4857         data_format=data_format, name=name)
4858     _result = _op.outputs[:]
4859     _inputs_flat = _op.inputs
4860     _attrs = ("ksize", _op.get_attr("ksize"), "strides",
4861               _op.get_attr("strides"), "padding", _op.get_attr("padding"),
4862               "data_format", _op.get_attr("data_format"), "T",
4863               _op.get_attr("T"), "TInput", _op.get_attr("TInput"))
4864     _execute.record_gradient(
4865       "MaxPool3DGrad", _inputs_flat, _attrs, _result, name)
4866     _result, = _result
4867     return _result
4868 
4869   else:
4870     try:
4871       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
4872         _ctx._context_handle, _ctx._eager_context.device_name,
4873         "MaxPool3DGrad", name, _ctx._post_execution_callbacks, orig_input,
4874         orig_output, grad, "ksize", ksize, "strides", strides, "padding",
4875         padding, "data_format", data_format)
4876       return _result
4877     except _core._FallbackException:
4878       return max_pool3d_grad_eager_fallback(
4879           orig_input, orig_output, grad, ksize=ksize, strides=strides,
4880           padding=padding, data_format=data_format, name=name, ctx=_ctx)
4881     except _core._NotOkStatusException as e:
4882       if name is not None:
4883         message = e.message + " name: " + name
4884       else:
4885         message = e.message
4886       _six.raise_from(_core._status_to_exception(e.code, message), None)
4887 
4888 
4889 def max_pool3d_grad_eager_fallback(orig_input, orig_output, grad, ksize, strides, padding, data_format="NDHWC", name=None, ctx=None):
4890   r"""This is the slowpath function for Eager mode.
4891   This is for function max_pool3d_grad
4892   """
4893   _ctx = ctx if ctx else _context.context()
4894   if not isinstance(ksize, (list, tuple)):
4895     raise TypeError(
4896         "Expected list for 'ksize' argument to "
4897         "'max_pool3d_grad' Op, not %r." % ksize)
4898   ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
4899   if not isinstance(strides, (list, tuple)):
4900     raise TypeError(
4901         "Expected list for 'strides' argument to "
4902         "'max_pool3d_grad' Op, not %r." % strides)
4903   strides = [_execute.make_int(_i, "strides") for _i in strides]
4904   padding = _execute.make_str(padding, "padding")
4905   if data_format is None:
4906     data_format = "NDHWC"
4907   data_format = _execute.make_str(data_format, "data_format")
4908   _attr_T, (grad,) = _execute.args_to_matching_eager([grad], _ctx, _dtypes.float32)
4909   _attr_TInput, _inputs_TInput = _execute.args_to_matching_eager([orig_input, orig_output], _ctx, _dtypes.float32)
4910   (orig_input, orig_output) = _inputs_TInput
4911   _inputs_flat = [orig_input, orig_output, grad]
4912   _attrs = ("ksize", ksize, "strides", strides, "padding", padding,
4913   "data_format", data_format, "T", _attr_T, "TInput", _attr_TInput)
4914   _result = _execute.execute(b"MaxPool3DGrad", 1, inputs=_inputs_flat,
4915                              attrs=_attrs, ctx=_ctx, name=name)
4916   _execute.record_gradient(
4917       "MaxPool3DGrad", _inputs_flat, _attrs, _result, name)
4918   _result, = _result
4919   return _result
4920 
4921 
4922 def max_pool3d_grad_grad(orig_input, orig_output, grad, ksize, strides, padding, data_format="NDHWC", name=None):
4923   r"""Computes second-order gradients of the maxpooling function.
4924 
4925   Args:
4926     orig_input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
4927       The original input tensor.
4928     orig_output: A `Tensor`. Must have the same type as `orig_input`.
4929       The original output tensor.
4930     grad: A `Tensor`. Must have the same type as `orig_input`.
4931       Output backprop of shape `[batch, depth, rows, cols, channels]`.
4932     ksize: A list of `ints` that has length `>= 5`.
4933       1-D tensor of length 5. The size of the window for each dimension of
4934       the input tensor. Must have `ksize[0] = ksize[4] = 1`.
4935     strides: A list of `ints` that has length `>= 5`.
4936       1-D tensor of length 5. The stride of the sliding window for each
4937       dimension of `input`. Must have `strides[0] = strides[4] = 1`.
4938     padding: A `string` from: `"SAME", "VALID"`.
4939       The type of padding algorithm to use.
4940     data_format: An optional `string` from: `"NDHWC", "NCDHW"`. Defaults to `"NDHWC"`.
4941       The data format of the input and output data. With the
4942       default format "NDHWC", the data is stored in the order of:
4943           [batch, in_depth, in_height, in_width, in_channels].
4944       Alternatively, the format could be "NCDHW", the data storage order is:
4945           [batch, in_channels, in_depth, in_height, in_width].
4946     name: A name for the operation (optional).
4947 
4948   Returns:
4949     A `Tensor`. Has the same type as `orig_input`.
4950   """
4951   _ctx = _context._context
4952   if _ctx is None or not _ctx._eager_context.is_eager:
4953     if not isinstance(ksize, (list, tuple)):
4954       raise TypeError(
4955           "Expected list for 'ksize' argument to "
4956           "'max_pool3d_grad_grad' Op, not %r." % ksize)
4957     ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
4958     if not isinstance(strides, (list, tuple)):
4959       raise TypeError(
4960           "Expected list for 'strides' argument to "
4961           "'max_pool3d_grad_grad' Op, not %r." % strides)
4962     strides = [_execute.make_int(_i, "strides") for _i in strides]
4963     padding = _execute.make_str(padding, "padding")
4964     if data_format is None:
4965       data_format = "NDHWC"
4966     data_format = _execute.make_str(data_format, "data_format")
4967     _, _, _op = _op_def_lib._apply_op_helper(
4968         "MaxPool3DGradGrad", orig_input=orig_input, orig_output=orig_output,
4969         grad=grad, ksize=ksize, strides=strides, padding=padding,
4970         data_format=data_format, name=name)
4971     _result = _op.outputs[:]
4972     _inputs_flat = _op.inputs
4973     _attrs = ("ksize", _op.get_attr("ksize"), "strides",
4974               _op.get_attr("strides"), "padding", _op.get_attr("padding"),
4975               "data_format", _op.get_attr("data_format"), "T",
4976               _op.get_attr("T"))
4977     _execute.record_gradient(
4978       "MaxPool3DGradGrad", _inputs_flat, _attrs, _result, name)
4979     _result, = _result
4980     return _result
4981 
4982   else:
4983     try:
4984       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
4985         _ctx._context_handle, _ctx._eager_context.device_name,
4986         "MaxPool3DGradGrad", name, _ctx._post_execution_callbacks, orig_input,
4987         orig_output, grad, "ksize", ksize, "strides", strides, "padding",
4988         padding, "data_format", data_format)
4989       return _result
4990     except _core._FallbackException:
4991       return max_pool3d_grad_grad_eager_fallback(
4992           orig_input, orig_output, grad, ksize=ksize, strides=strides,
4993           padding=padding, data_format=data_format, name=name, ctx=_ctx)
4994     except _core._NotOkStatusException as e:
4995       if name is not None:
4996         message = e.message + " name: " + name
4997       else:
4998         message = e.message
4999       _six.raise_from(_core._status_to_exception(e.code, message), None)
5000 
5001 
5002 def max_pool3d_grad_grad_eager_fallback(orig_input, orig_output, grad, ksize, strides, padding, data_format="NDHWC", name=None, ctx=None):
5003   r"""This is the slowpath function for Eager mode.
5004   This is for function max_pool3d_grad_grad
5005   """
5006   _ctx = ctx if ctx else _context.context()
5007   if not isinstance(ksize, (list, tuple)):
5008     raise TypeError(
5009         "Expected list for 'ksize' argument to "
5010         "'max_pool3d_grad_grad' Op, not %r." % ksize)
5011   ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
5012   if not isinstance(strides, (list, tuple)):
5013     raise TypeError(
5014         "Expected list for 'strides' argument to "
5015         "'max_pool3d_grad_grad' Op, not %r." % strides)
5016   strides = [_execute.make_int(_i, "strides") for _i in strides]
5017   padding = _execute.make_str(padding, "padding")
5018   if data_format is None:
5019     data_format = "NDHWC"
5020   data_format = _execute.make_str(data_format, "data_format")
5021   _attr_T, _inputs_T = _execute.args_to_matching_eager([orig_input, orig_output, grad], _ctx)
5022   (orig_input, orig_output, grad) = _inputs_T
5023   _inputs_flat = [orig_input, orig_output, grad]
5024   _attrs = ("ksize", ksize, "strides", strides, "padding", padding,
5025   "data_format", data_format, "T", _attr_T)
5026   _result = _execute.execute(b"MaxPool3DGradGrad", 1, inputs=_inputs_flat,
5027                              attrs=_attrs, ctx=_ctx, name=name)
5028   _execute.record_gradient(
5029       "MaxPool3DGradGrad", _inputs_flat, _attrs, _result, name)
5030   _result, = _result
5031   return _result
5032 
5033 
5034 def max_pool_grad(orig_input, orig_output, grad, ksize, strides, padding, data_format="NHWC", name=None):
5035   r"""Computes gradients of the maxpooling function.
5036 
5037   Args:
5038     orig_input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
5039       The original input tensor.
5040     orig_output: A `Tensor`. Must have the same type as `orig_input`.
5041       The original output tensor.
5042     grad: A `Tensor`. Must have the same type as `orig_input`.
5043       4-D.  Gradients w.r.t. the output of `max_pool`.
5044     ksize: A list of `ints` that has length `>= 4`.
5045       The size of the window for each dimension of the input tensor.
5046     strides: A list of `ints` that has length `>= 4`.
5047       The stride of the sliding window for each dimension of the
5048       input tensor.
5049     padding: A `string` from: `"SAME", "VALID"`.
5050       The type of padding algorithm to use.
5051     data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`.
5052       Specify the data format of the input and output data. With the
5053       default format "NHWC", the data is stored in the order of:
5054           [batch, in_height, in_width, in_channels].
5055       Alternatively, the format could be "NCHW", the data storage order of:
5056           [batch, in_channels, in_height, in_width].
5057     name: A name for the operation (optional).
5058 
5059   Returns:
5060     A `Tensor`. Has the same type as `orig_input`.
5061   """
5062   _ctx = _context._context
5063   if _ctx is None or not _ctx._eager_context.is_eager:
5064     if not isinstance(ksize, (list, tuple)):
5065       raise TypeError(
5066           "Expected list for 'ksize' argument to "
5067           "'max_pool_grad' Op, not %r." % ksize)
5068     ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
5069     if not isinstance(strides, (list, tuple)):
5070       raise TypeError(
5071           "Expected list for 'strides' argument to "
5072           "'max_pool_grad' Op, not %r." % strides)
5073     strides = [_execute.make_int(_i, "strides") for _i in strides]
5074     padding = _execute.make_str(padding, "padding")
5075     if data_format is None:
5076       data_format = "NHWC"
5077     data_format = _execute.make_str(data_format, "data_format")
5078     _, _, _op = _op_def_lib._apply_op_helper(
5079         "MaxPoolGrad", orig_input=orig_input, orig_output=orig_output,
5080         grad=grad, ksize=ksize, strides=strides, padding=padding,
5081         data_format=data_format, name=name)
5082     _result = _op.outputs[:]
5083     _inputs_flat = _op.inputs
5084     _attrs = ("ksize", _op.get_attr("ksize"), "strides",
5085               _op.get_attr("strides"), "padding", _op.get_attr("padding"),
5086               "data_format", _op.get_attr("data_format"), "T",
5087               _op.get_attr("T"))
5088     _execute.record_gradient(
5089       "MaxPoolGrad", _inputs_flat, _attrs, _result, name)
5090     _result, = _result
5091     return _result
5092 
5093   else:
5094     try:
5095       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
5096         _ctx._context_handle, _ctx._eager_context.device_name, "MaxPoolGrad",
5097         name, _ctx._post_execution_callbacks, orig_input, orig_output, grad,
5098         "ksize", ksize, "strides", strides, "padding", padding, "data_format",
5099         data_format)
5100       return _result
5101     except _core._FallbackException:
5102       return max_pool_grad_eager_fallback(
5103           orig_input, orig_output, grad, ksize=ksize, strides=strides,
5104           padding=padding, data_format=data_format, name=name, ctx=_ctx)
5105     except _core._NotOkStatusException as e:
5106       if name is not None:
5107         message = e.message + " name: " + name
5108       else:
5109         message = e.message
5110       _six.raise_from(_core._status_to_exception(e.code, message), None)
5111 
5112 
5113 def max_pool_grad_eager_fallback(orig_input, orig_output, grad, ksize, strides, padding, data_format="NHWC", name=None, ctx=None):
5114   r"""This is the slowpath function for Eager mode.
5115   This is for function max_pool_grad
5116   """
5117   _ctx = ctx if ctx else _context.context()
5118   if not isinstance(ksize, (list, tuple)):
5119     raise TypeError(
5120         "Expected list for 'ksize' argument to "
5121         "'max_pool_grad' Op, not %r." % ksize)
5122   ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
5123   if not isinstance(strides, (list, tuple)):
5124     raise TypeError(
5125         "Expected list for 'strides' argument to "
5126         "'max_pool_grad' Op, not %r." % strides)
5127   strides = [_execute.make_int(_i, "strides") for _i in strides]
5128   padding = _execute.make_str(padding, "padding")
5129   if data_format is None:
5130     data_format = "NHWC"
5131   data_format = _execute.make_str(data_format, "data_format")
5132   _attr_T, _inputs_T = _execute.args_to_matching_eager([orig_input, orig_output, grad], _ctx, _dtypes.float32)
5133   (orig_input, orig_output, grad) = _inputs_T
5134   _inputs_flat = [orig_input, orig_output, grad]
5135   _attrs = ("ksize", ksize, "strides", strides, "padding", padding,
5136   "data_format", data_format, "T", _attr_T)
5137   _result = _execute.execute(b"MaxPoolGrad", 1, inputs=_inputs_flat,
5138                              attrs=_attrs, ctx=_ctx, name=name)
5139   _execute.record_gradient(
5140       "MaxPoolGrad", _inputs_flat, _attrs, _result, name)
5141   _result, = _result
5142   return _result
5143 
5144 
5145 def max_pool_grad_grad(orig_input, orig_output, grad, ksize, strides, padding, data_format="NHWC", name=None):
5146   r"""Computes second-order gradients of the maxpooling function.
5147 
5148   Args:
5149     orig_input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
5150       The original input tensor.
5151     orig_output: A `Tensor`. Must have the same type as `orig_input`.
5152       The original output tensor.
5153     grad: A `Tensor`. Must have the same type as `orig_input`.
5154       4-D.  Gradients of gradients w.r.t. the input of `max_pool`.
5155     ksize: A list of `ints` that has length `>= 4`.
5156       The size of the window for each dimension of the input tensor.
5157     strides: A list of `ints` that has length `>= 4`.
5158       The stride of the sliding window for each dimension of the
5159       input tensor.
5160     padding: A `string` from: `"SAME", "VALID"`.
5161       The type of padding algorithm to use.
5162     data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`.
5163       Specify the data format of the input and output data. With the
5164       default format "NHWC", the data is stored in the order of:
5165           [batch, in_height, in_width, in_channels].
5166       Alternatively, the format could be "NCHW", the data storage order of:
5167           [batch, in_channels, in_height, in_width].
5168     name: A name for the operation (optional).
5169 
5170   Returns:
5171     A `Tensor`. Has the same type as `orig_input`.
5172   """
5173   _ctx = _context._context
5174   if _ctx is None or not _ctx._eager_context.is_eager:
5175     if not isinstance(ksize, (list, tuple)):
5176       raise TypeError(
5177           "Expected list for 'ksize' argument to "
5178           "'max_pool_grad_grad' Op, not %r." % ksize)
5179     ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
5180     if not isinstance(strides, (list, tuple)):
5181       raise TypeError(
5182           "Expected list for 'strides' argument to "
5183           "'max_pool_grad_grad' Op, not %r." % strides)
5184     strides = [_execute.make_int(_i, "strides") for _i in strides]
5185     padding = _execute.make_str(padding, "padding")
5186     if data_format is None:
5187       data_format = "NHWC"
5188     data_format = _execute.make_str(data_format, "data_format")
5189     _, _, _op = _op_def_lib._apply_op_helper(
5190         "MaxPoolGradGrad", orig_input=orig_input, orig_output=orig_output,
5191         grad=grad, ksize=ksize, strides=strides, padding=padding,
5192         data_format=data_format, name=name)
5193     _result = _op.outputs[:]
5194     _inputs_flat = _op.inputs
5195     _attrs = ("ksize", _op.get_attr("ksize"), "strides",
5196               _op.get_attr("strides"), "padding", _op.get_attr("padding"),
5197               "data_format", _op.get_attr("data_format"), "T",
5198               _op.get_attr("T"))
5199     _execute.record_gradient(
5200       "MaxPoolGradGrad", _inputs_flat, _attrs, _result, name)
5201     _result, = _result
5202     return _result
5203 
5204   else:
5205     try:
5206       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
5207         _ctx._context_handle, _ctx._eager_context.device_name,
5208         "MaxPoolGradGrad", name, _ctx._post_execution_callbacks, orig_input,
5209         orig_output, grad, "ksize", ksize, "strides", strides, "padding",
5210         padding, "data_format", data_format)
5211       return _result
5212     except _core._FallbackException:
5213       return max_pool_grad_grad_eager_fallback(
5214           orig_input, orig_output, grad, ksize=ksize, strides=strides,
5215           padding=padding, data_format=data_format, name=name, ctx=_ctx)
5216     except _core._NotOkStatusException as e:
5217       if name is not None:
5218         message = e.message + " name: " + name
5219       else:
5220         message = e.message
5221       _six.raise_from(_core._status_to_exception(e.code, message), None)
5222 
5223 
5224 def max_pool_grad_grad_eager_fallback(orig_input, orig_output, grad, ksize, strides, padding, data_format="NHWC", name=None, ctx=None):
5225   r"""This is the slowpath function for Eager mode.
5226   This is for function max_pool_grad_grad
5227   """
5228   _ctx = ctx if ctx else _context.context()
5229   if not isinstance(ksize, (list, tuple)):
5230     raise TypeError(
5231         "Expected list for 'ksize' argument to "
5232         "'max_pool_grad_grad' Op, not %r." % ksize)
5233   ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
5234   if not isinstance(strides, (list, tuple)):
5235     raise TypeError(
5236         "Expected list for 'strides' argument to "
5237         "'max_pool_grad_grad' Op, not %r." % strides)
5238   strides = [_execute.make_int(_i, "strides") for _i in strides]
5239   padding = _execute.make_str(padding, "padding")
5240   if data_format is None:
5241     data_format = "NHWC"
5242   data_format = _execute.make_str(data_format, "data_format")
5243   _attr_T, _inputs_T = _execute.args_to_matching_eager([orig_input, orig_output, grad], _ctx)
5244   (orig_input, orig_output, grad) = _inputs_T
5245   _inputs_flat = [orig_input, orig_output, grad]
5246   _attrs = ("ksize", ksize, "strides", strides, "padding", padding,
5247   "data_format", data_format, "T", _attr_T)
5248   _result = _execute.execute(b"MaxPoolGradGrad", 1, inputs=_inputs_flat,
5249                              attrs=_attrs, ctx=_ctx, name=name)
5250   _execute.record_gradient(
5251       "MaxPoolGradGrad", _inputs_flat, _attrs, _result, name)
5252   _result, = _result
5253   return _result
5254 
5255 
5256 def max_pool_grad_grad_v2(orig_input, orig_output, grad, ksize, strides, padding, data_format="NHWC", name=None):
5257   r"""Computes second-order gradients of the maxpooling function.
5258 
5259   Args:
5260     orig_input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
5261       The original input tensor.
5262     orig_output: A `Tensor`. Must have the same type as `orig_input`.
5263       The original output tensor.
5264     grad: A `Tensor`. Must have the same type as `orig_input`.
5265       4-D.  Gradients of gradients w.r.t. the input of `max_pool`.
5266     ksize: A `Tensor` of type `int32`.
5267       The size of the window for each dimension of the input tensor.
5268     strides: A `Tensor` of type `int32`.
5269       The stride of the sliding window for each dimension of the
5270       input tensor.
5271     padding: A `string` from: `"SAME", "VALID"`.
5272       The type of padding algorithm to use.
5273     data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`.
5274       Specify the data format of the input and output data. With the
5275       default format "NHWC", the data is stored in the order of:
5276           [batch, in_height, in_width, in_channels].
5277       Alternatively, the format could be "NCHW", the data storage order of:
5278           [batch, in_channels, in_height, in_width].
5279     name: A name for the operation (optional).
5280 
5281   Returns:
5282     A `Tensor`. Has the same type as `orig_input`.
5283   """
5284   _ctx = _context._context
5285   if _ctx is None or not _ctx._eager_context.is_eager:
5286     padding = _execute.make_str(padding, "padding")
5287     if data_format is None:
5288       data_format = "NHWC"
5289     data_format = _execute.make_str(data_format, "data_format")
5290     _, _, _op = _op_def_lib._apply_op_helper(
5291         "MaxPoolGradGradV2", orig_input=orig_input, orig_output=orig_output,
5292         grad=grad, ksize=ksize, strides=strides, padding=padding,
5293         data_format=data_format, name=name)
5294     _result = _op.outputs[:]
5295     _inputs_flat = _op.inputs
5296     _attrs = ("padding", _op.get_attr("padding"), "data_format",
5297               _op.get_attr("data_format"), "T", _op.get_attr("T"))
5298     _execute.record_gradient(
5299       "MaxPoolGradGradV2", _inputs_flat, _attrs, _result, name)
5300     _result, = _result
5301     return _result
5302 
5303   else:
5304     try:
5305       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
5306         _ctx._context_handle, _ctx._eager_context.device_name,
5307         "MaxPoolGradGradV2", name, _ctx._post_execution_callbacks, orig_input,
5308         orig_output, grad, ksize, strides, "padding", padding, "data_format",
5309         data_format)
5310       return _result
5311     except _core._FallbackException:
5312       return max_pool_grad_grad_v2_eager_fallback(
5313           orig_input, orig_output, grad, ksize, strides, padding=padding,
5314           data_format=data_format, name=name, ctx=_ctx)
5315     except _core._NotOkStatusException as e:
5316       if name is not None:
5317         message = e.message + " name: " + name
5318       else:
5319         message = e.message
5320       _six.raise_from(_core._status_to_exception(e.code, message), None)
5321 
5322 
5323 def max_pool_grad_grad_v2_eager_fallback(orig_input, orig_output, grad, ksize, strides, padding, data_format="NHWC", name=None, ctx=None):
5324   r"""This is the slowpath function for Eager mode.
5325   This is for function max_pool_grad_grad_v2
5326   """
5327   _ctx = ctx if ctx else _context.context()
5328   padding = _execute.make_str(padding, "padding")
5329   if data_format is None:
5330     data_format = "NHWC"
5331   data_format = _execute.make_str(data_format, "data_format")
5332   _attr_T, _inputs_T = _execute.args_to_matching_eager([orig_input, orig_output, grad], _ctx)
5333   (orig_input, orig_output, grad) = _inputs_T
5334   ksize = _ops.convert_to_tensor(ksize, _dtypes.int32)
5335   strides = _ops.convert_to_tensor(strides, _dtypes.int32)
5336   _inputs_flat = [orig_input, orig_output, grad, ksize, strides]
5337   _attrs = ("padding", padding, "data_format", data_format, "T", _attr_T)
5338   _result = _execute.execute(b"MaxPoolGradGradV2", 1, inputs=_inputs_flat,
5339                              attrs=_attrs, ctx=_ctx, name=name)
5340   _execute.record_gradient(
5341       "MaxPoolGradGradV2", _inputs_flat, _attrs, _result, name)
5342   _result, = _result
5343   return _result
5344 
5345 
5346 def max_pool_grad_grad_with_argmax(input, grad, argmax, ksize, strides, padding, name=None):
5347   r"""Computes second-order gradients of the maxpooling function.
5348 
5349   Args:
5350     input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
5351       The original input.
5352     grad: A `Tensor`. Must have the same type as `input`.
5353       4-D with shape `[batch, height, width, channels]`.  Gradients w.r.t. the
5354       input of `max_pool`.
5355     argmax: A `Tensor`. Must be one of the following types: `int32`, `int64`.
5356       The indices of the maximum values chosen for each output of `max_pool`.
5357     ksize: A list of `ints` that has length `>= 4`.
5358       The size of the window for each dimension of the input tensor.
5359     strides: A list of `ints` that has length `>= 4`.
5360       The stride of the sliding window for each dimension of the
5361       input tensor.
5362     padding: A `string` from: `"SAME", "VALID"`.
5363       The type of padding algorithm to use.
5364     name: A name for the operation (optional).
5365 
5366   Returns:
5367     A `Tensor`. Has the same type as `input`.
5368   """
5369   _ctx = _context._context
5370   if _ctx is None or not _ctx._eager_context.is_eager:
5371     if not isinstance(ksize, (list, tuple)):
5372       raise TypeError(
5373           "Expected list for 'ksize' argument to "
5374           "'max_pool_grad_grad_with_argmax' Op, not %r." % ksize)
5375     ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
5376     if not isinstance(strides, (list, tuple)):
5377       raise TypeError(
5378           "Expected list for 'strides' argument to "
5379           "'max_pool_grad_grad_with_argmax' Op, not %r." % strides)
5380     strides = [_execute.make_int(_i, "strides") for _i in strides]
5381     padding = _execute.make_str(padding, "padding")
5382     _, _, _op = _op_def_lib._apply_op_helper(
5383         "MaxPoolGradGradWithArgmax", input=input, grad=grad, argmax=argmax,
5384         ksize=ksize, strides=strides, padding=padding, name=name)
5385     _result = _op.outputs[:]
5386     _inputs_flat = _op.inputs
5387     _attrs = ("ksize", _op.get_attr("ksize"), "strides",
5388               _op.get_attr("strides"), "padding", _op.get_attr("padding"),
5389               "Targmax", _op.get_attr("Targmax"), "T", _op.get_attr("T"))
5390     _execute.record_gradient(
5391       "MaxPoolGradGradWithArgmax", _inputs_flat, _attrs, _result, name)
5392     _result, = _result
5393     return _result
5394 
5395   else:
5396     try:
5397       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
5398         _ctx._context_handle, _ctx._eager_context.device_name,
5399         "MaxPoolGradGradWithArgmax", name, _ctx._post_execution_callbacks,
5400         input, grad, argmax, "ksize", ksize, "strides", strides, "padding",
5401         padding)
5402       return _result
5403     except _core._FallbackException:
5404       return max_pool_grad_grad_with_argmax_eager_fallback(
5405           input, grad, argmax, ksize=ksize, strides=strides, padding=padding,
5406           name=name, ctx=_ctx)
5407     except _core._NotOkStatusException as e:
5408       if name is not None:
5409         message = e.message + " name: " + name
5410       else:
5411         message = e.message
5412       _six.raise_from(_core._status_to_exception(e.code, message), None)
5413 
5414 
5415 def max_pool_grad_grad_with_argmax_eager_fallback(input, grad, argmax, ksize, strides, padding, name=None, ctx=None):
5416   r"""This is the slowpath function for Eager mode.
5417   This is for function max_pool_grad_grad_with_argmax
5418   """
5419   _ctx = ctx if ctx else _context.context()
5420   if not isinstance(ksize, (list, tuple)):
5421     raise TypeError(
5422         "Expected list for 'ksize' argument to "
5423         "'max_pool_grad_grad_with_argmax' Op, not %r." % ksize)
5424   ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
5425   if not isinstance(strides, (list, tuple)):
5426     raise TypeError(
5427         "Expected list for 'strides' argument to "
5428         "'max_pool_grad_grad_with_argmax' Op, not %r." % strides)
5429   strides = [_execute.make_int(_i, "strides") for _i in strides]
5430   padding = _execute.make_str(padding, "padding")
5431   _attr_Targmax, (argmax,) = _execute.args_to_matching_eager([argmax], _ctx)
5432   _attr_T, _inputs_T = _execute.args_to_matching_eager([input, grad], _ctx)
5433   (input, grad) = _inputs_T
5434   _inputs_flat = [input, grad, argmax]
5435   _attrs = ("ksize", ksize, "strides", strides, "padding", padding, "Targmax",
5436   _attr_Targmax, "T", _attr_T)
5437   _result = _execute.execute(b"MaxPoolGradGradWithArgmax", 1,
5438                              inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
5439                              name=name)
5440   _execute.record_gradient(
5441       "MaxPoolGradGradWithArgmax", _inputs_flat, _attrs, _result, name)
5442   _result, = _result
5443   return _result
5444 
5445 
5446 def max_pool_grad_v2(orig_input, orig_output, grad, ksize, strides, padding, data_format="NHWC", name=None):
5447   r"""Computes gradients of the maxpooling function.
5448 
5449   Args:
5450     orig_input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
5451       The original input tensor.
5452     orig_output: A `Tensor`. Must have the same type as `orig_input`.
5453       The original output tensor.
5454     grad: A `Tensor`. Must have the same type as `orig_input`.
5455       4-D.  Gradients w.r.t. the output of `max_pool`.
5456     ksize: A `Tensor` of type `int32`.
5457       The size of the window for each dimension of the input tensor.
5458     strides: A `Tensor` of type `int32`.
5459       The stride of the sliding window for each dimension of the
5460       input tensor.
5461     padding: A `string` from: `"SAME", "VALID"`.
5462       The type of padding algorithm to use.
5463     data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`.
5464       Specify the data format of the input and output data. With the
5465       default format "NHWC", the data is stored in the order of:
5466           [batch, in_height, in_width, in_channels].
5467       Alternatively, the format could be "NCHW", the data storage order of:
5468           [batch, in_channels, in_height, in_width].
5469     name: A name for the operation (optional).
5470 
5471   Returns:
5472     A `Tensor`. Has the same type as `orig_input`.
5473   """
5474   _ctx = _context._context
5475   if _ctx is None or not _ctx._eager_context.is_eager:
5476     padding = _execute.make_str(padding, "padding")
5477     if data_format is None:
5478       data_format = "NHWC"
5479     data_format = _execute.make_str(data_format, "data_format")
5480     _, _, _op = _op_def_lib._apply_op_helper(
5481         "MaxPoolGradV2", orig_input=orig_input, orig_output=orig_output,
5482         grad=grad, ksize=ksize, strides=strides, padding=padding,
5483         data_format=data_format, name=name)
5484     _result = _op.outputs[:]
5485     _inputs_flat = _op.inputs
5486     _attrs = ("padding", _op.get_attr("padding"), "data_format",
5487               _op.get_attr("data_format"), "T", _op.get_attr("T"))
5488     _execute.record_gradient(
5489       "MaxPoolGradV2", _inputs_flat, _attrs, _result, name)
5490     _result, = _result
5491     return _result
5492 
5493   else:
5494     try:
5495       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
5496         _ctx._context_handle, _ctx._eager_context.device_name,
5497         "MaxPoolGradV2", name, _ctx._post_execution_callbacks, orig_input,
5498         orig_output, grad, ksize, strides, "padding", padding, "data_format",
5499         data_format)
5500       return _result
5501     except _core._FallbackException:
5502       return max_pool_grad_v2_eager_fallback(
5503           orig_input, orig_output, grad, ksize, strides, padding=padding,
5504           data_format=data_format, name=name, ctx=_ctx)
5505     except _core._NotOkStatusException as e:
5506       if name is not None:
5507         message = e.message + " name: " + name
5508       else:
5509         message = e.message
5510       _six.raise_from(_core._status_to_exception(e.code, message), None)
5511 
5512 
5513 def max_pool_grad_v2_eager_fallback(orig_input, orig_output, grad, ksize, strides, padding, data_format="NHWC", name=None, ctx=None):
5514   r"""This is the slowpath function for Eager mode.
5515   This is for function max_pool_grad_v2
5516   """
5517   _ctx = ctx if ctx else _context.context()
5518   padding = _execute.make_str(padding, "padding")
5519   if data_format is None:
5520     data_format = "NHWC"
5521   data_format = _execute.make_str(data_format, "data_format")
5522   _attr_T, _inputs_T = _execute.args_to_matching_eager([orig_input, orig_output, grad], _ctx, _dtypes.float32)
5523   (orig_input, orig_output, grad) = _inputs_T
5524   ksize = _ops.convert_to_tensor(ksize, _dtypes.int32)
5525   strides = _ops.convert_to_tensor(strides, _dtypes.int32)
5526   _inputs_flat = [orig_input, orig_output, grad, ksize, strides]
5527   _attrs = ("padding", padding, "data_format", data_format, "T", _attr_T)
5528   _result = _execute.execute(b"MaxPoolGradV2", 1, inputs=_inputs_flat,
5529                              attrs=_attrs, ctx=_ctx, name=name)
5530   _execute.record_gradient(
5531       "MaxPoolGradV2", _inputs_flat, _attrs, _result, name)
5532   _result, = _result
5533   return _result
5534 
5535 
5536 def max_pool_grad_with_argmax(input, grad, argmax, ksize, strides, padding, name=None):
5537   r"""Computes gradients of the maxpooling function.
5538 
5539   Args:
5540     input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
5541       The original input.
5542     grad: A `Tensor`. Must have the same type as `input`.
5543       4-D with shape `[batch, height, width, channels]`.  Gradients w.r.t. the
5544       output of `max_pool`.
5545     argmax: A `Tensor`. Must be one of the following types: `int32`, `int64`.
5546       The indices of the maximum values chosen for each output of `max_pool`.
5547     ksize: A list of `ints` that has length `>= 4`.
5548       The size of the window for each dimension of the input tensor.
5549     strides: A list of `ints` that has length `>= 4`.
5550       The stride of the sliding window for each dimension of the
5551       input tensor.
5552     padding: A `string` from: `"SAME", "VALID"`.
5553       The type of padding algorithm to use.
5554     name: A name for the operation (optional).
5555 
5556   Returns:
5557     A `Tensor`. Has the same type as `input`.
5558   """
5559   _ctx = _context._context
5560   if _ctx is None or not _ctx._eager_context.is_eager:
5561     if not isinstance(ksize, (list, tuple)):
5562       raise TypeError(
5563           "Expected list for 'ksize' argument to "
5564           "'max_pool_grad_with_argmax' Op, not %r." % ksize)
5565     ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
5566     if not isinstance(strides, (list, tuple)):
5567       raise TypeError(
5568           "Expected list for 'strides' argument to "
5569           "'max_pool_grad_with_argmax' Op, not %r." % strides)
5570     strides = [_execute.make_int(_i, "strides") for _i in strides]
5571     padding = _execute.make_str(padding, "padding")
5572     _, _, _op = _op_def_lib._apply_op_helper(
5573         "MaxPoolGradWithArgmax", input=input, grad=grad, argmax=argmax,
5574         ksize=ksize, strides=strides, padding=padding, name=name)
5575     _result = _op.outputs[:]
5576     _inputs_flat = _op.inputs
5577     _attrs = ("ksize", _op.get_attr("ksize"), "strides",
5578               _op.get_attr("strides"), "padding", _op.get_attr("padding"),
5579               "Targmax", _op.get_attr("Targmax"), "T", _op.get_attr("T"))
5580     _execute.record_gradient(
5581       "MaxPoolGradWithArgmax", _inputs_flat, _attrs, _result, name)
5582     _result, = _result
5583     return _result
5584 
5585   else:
5586     try:
5587       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
5588         _ctx._context_handle, _ctx._eager_context.device_name,
5589         "MaxPoolGradWithArgmax", name, _ctx._post_execution_callbacks, input,
5590         grad, argmax, "ksize", ksize, "strides", strides, "padding", padding)
5591       return _result
5592     except _core._FallbackException:
5593       return max_pool_grad_with_argmax_eager_fallback(
5594           input, grad, argmax, ksize=ksize, strides=strides, padding=padding,
5595           name=name, ctx=_ctx)
5596     except _core._NotOkStatusException as e:
5597       if name is not None:
5598         message = e.message + " name: " + name
5599       else:
5600         message = e.message
5601       _six.raise_from(_core._status_to_exception(e.code, message), None)
5602 
5603 
5604 def max_pool_grad_with_argmax_eager_fallback(input, grad, argmax, ksize, strides, padding, name=None, ctx=None):
5605   r"""This is the slowpath function for Eager mode.
5606   This is for function max_pool_grad_with_argmax
5607   """
5608   _ctx = ctx if ctx else _context.context()
5609   if not isinstance(ksize, (list, tuple)):
5610     raise TypeError(
5611         "Expected list for 'ksize' argument to "
5612         "'max_pool_grad_with_argmax' Op, not %r." % ksize)
5613   ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
5614   if not isinstance(strides, (list, tuple)):
5615     raise TypeError(
5616         "Expected list for 'strides' argument to "
5617         "'max_pool_grad_with_argmax' Op, not %r." % strides)
5618   strides = [_execute.make_int(_i, "strides") for _i in strides]
5619   padding = _execute.make_str(padding, "padding")
5620   _attr_Targmax, (argmax,) = _execute.args_to_matching_eager([argmax], _ctx)
5621   _attr_T, _inputs_T = _execute.args_to_matching_eager([input, grad], _ctx)
5622   (input, grad) = _inputs_T
5623   _inputs_flat = [input, grad, argmax]
5624   _attrs = ("ksize", ksize, "strides", strides, "padding", padding, "Targmax",
5625   _attr_Targmax, "T", _attr_T)
5626   _result = _execute.execute(b"MaxPoolGradWithArgmax", 1, inputs=_inputs_flat,
5627                              attrs=_attrs, ctx=_ctx, name=name)
5628   _execute.record_gradient(
5629       "MaxPoolGradWithArgmax", _inputs_flat, _attrs, _result, name)
5630   _result, = _result
5631   return _result
5632 
5633 
5634 def max_pool_v2(input, ksize, strides, padding, data_format="NHWC", name=None):
5635   r"""Performs max pooling on the input.
5636 
5637   Args:
5638     input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`, `int32`, `int64`, `uint8`, `int16`, `int8`, `uint16`, `qint8`.
5639       4-D input to pool over.
5640     ksize: A `Tensor` of type `int32`.
5641       The size of the window for each dimension of the input tensor.
5642     strides: A `Tensor` of type `int32`.
5643       The stride of the sliding window for each dimension of the
5644       input tensor.
5645     padding: A `string` from: `"SAME", "VALID"`.
5646       The type of padding algorithm to use.
5647     data_format: An optional `string` from: `"NHWC", "NCHW", "NCHW_VECT_C"`. Defaults to `"NHWC"`.
5648       Specify the data format of the input and output data. With the
5649       default format "NHWC", the data is stored in the order of:
5650           [batch, in_height, in_width, in_channels].
5651       Alternatively, the format could be "NCHW", the data storage order of:
5652           [batch, in_channels, in_height, in_width].
5653     name: A name for the operation (optional).
5654 
5655   Returns:
5656     A `Tensor`. Has the same type as `input`.
5657   """
5658   _ctx = _context._context
5659   if _ctx is None or not _ctx._eager_context.is_eager:
5660     padding = _execute.make_str(padding, "padding")
5661     if data_format is None:
5662       data_format = "NHWC"
5663     data_format = _execute.make_str(data_format, "data_format")
5664     _, _, _op = _op_def_lib._apply_op_helper(
5665         "MaxPoolV2", input=input, ksize=ksize, strides=strides,
5666         padding=padding, data_format=data_format, name=name)
5667     _result = _op.outputs[:]
5668     _inputs_flat = _op.inputs
5669     _attrs = ("T", _op.get_attr("T"), "padding", _op.get_attr("padding"),
5670               "data_format", _op.get_attr("data_format"))
5671     _execute.record_gradient(
5672       "MaxPoolV2", _inputs_flat, _attrs, _result, name)
5673     _result, = _result
5674     return _result
5675 
5676   else:
5677     try:
5678       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
5679         _ctx._context_handle, _ctx._eager_context.device_name, "MaxPoolV2",
5680         name, _ctx._post_execution_callbacks, input, ksize, strides,
5681         "padding", padding, "data_format", data_format)
5682       return _result
5683     except _core._FallbackException:
5684       return max_pool_v2_eager_fallback(
5685           input, ksize, strides, padding=padding, data_format=data_format,
5686           name=name, ctx=_ctx)
5687     except _core._NotOkStatusException as e:
5688       if name is not None:
5689         message = e.message + " name: " + name
5690       else:
5691         message = e.message
5692       _six.raise_from(_core._status_to_exception(e.code, message), None)
5693 
5694 
5695 def max_pool_v2_eager_fallback(input, ksize, strides, padding, data_format="NHWC", name=None, ctx=None):
5696   r"""This is the slowpath function for Eager mode.
5697   This is for function max_pool_v2
5698   """
5699   _ctx = ctx if ctx else _context.context()
5700   padding = _execute.make_str(padding, "padding")
5701   if data_format is None:
5702     data_format = "NHWC"
5703   data_format = _execute.make_str(data_format, "data_format")
5704   _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx, _dtypes.float32)
5705   ksize = _ops.convert_to_tensor(ksize, _dtypes.int32)
5706   strides = _ops.convert_to_tensor(strides, _dtypes.int32)
5707   _inputs_flat = [input, ksize, strides]
5708   _attrs = ("T", _attr_T, "padding", padding, "data_format", data_format)
5709   _result = _execute.execute(b"MaxPoolV2", 1, inputs=_inputs_flat,
5710                              attrs=_attrs, ctx=_ctx, name=name)
5711   _execute.record_gradient(
5712       "MaxPoolV2", _inputs_flat, _attrs, _result, name)
5713   _result, = _result
5714   return _result
5715 
5716 
5717 _max_pool_with_argmax_outputs = ["output", "argmax"]
5718 _MaxPoolWithArgmaxOutput = _collections.namedtuple(
5719     "MaxPoolWithArgmax", _max_pool_with_argmax_outputs)
5720 
5721 
5722 @tf_export('nn.max_pool_with_argmax')
5723 def max_pool_with_argmax(input, ksize, strides, padding, Targmax=_dtypes.int64, name=None):
5724   r"""Performs max pooling on the input and outputs both max values and indices.
5725 
5726   The indices in `argmax` are flattened, so that a maximum value at position
5727   `[b, y, x, c]` becomes flattened index
5728   `((b * height + y) * width + x) * channels + c`.
5729 
5730   The indices returned are always in `[0, height) x [0, width)` before flattening,
5731   even if padding is involved and the mathematically correct answer is outside
5732   (either negative or too large).  This is a bug, but fixing it is difficult to do
5733   in a safe backwards compatible way, especially due to flattening.
5734 
5735   Args:
5736     input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
5737       4-D with shape `[batch, height, width, channels]`.  Input to pool over.
5738     ksize: A list of `ints` that has length `>= 4`.
5739       The size of the window for each dimension of the input tensor.
5740     strides: A list of `ints` that has length `>= 4`.
5741       The stride of the sliding window for each dimension of the
5742       input tensor.
5743     padding: A `string` from: `"SAME", "VALID"`.
5744       The type of padding algorithm to use.
5745     Targmax: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int64`.
5746     name: A name for the operation (optional).
5747 
5748   Returns:
5749     A tuple of `Tensor` objects (output, argmax).
5750 
5751     output: A `Tensor`. Has the same type as `input`.
5752     argmax: A `Tensor` of type `Targmax`.
5753   """
5754   _ctx = _context._context
5755   if _ctx is None or not _ctx._eager_context.is_eager:
5756     if not isinstance(ksize, (list, tuple)):
5757       raise TypeError(
5758           "Expected list for 'ksize' argument to "
5759           "'max_pool_with_argmax' Op, not %r." % ksize)
5760     ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
5761     if not isinstance(strides, (list, tuple)):
5762       raise TypeError(
5763           "Expected list for 'strides' argument to "
5764           "'max_pool_with_argmax' Op, not %r." % strides)
5765     strides = [_execute.make_int(_i, "strides") for _i in strides]
5766     padding = _execute.make_str(padding, "padding")
5767     if Targmax is None:
5768       Targmax = _dtypes.int64
5769     Targmax = _execute.make_type(Targmax, "Targmax")
5770     _, _, _op = _op_def_lib._apply_op_helper(
5771         "MaxPoolWithArgmax", input=input, ksize=ksize, strides=strides,
5772         padding=padding, Targmax=Targmax, name=name)
5773     _result = _op.outputs[:]
5774     _inputs_flat = _op.inputs
5775     _attrs = ("ksize", _op.get_attr("ksize"), "strides",
5776               _op.get_attr("strides"), "Targmax", _op.get_attr("Targmax"),
5777               "padding", _op.get_attr("padding"), "T", _op.get_attr("T"))
5778     _execute.record_gradient(
5779       "MaxPoolWithArgmax", _inputs_flat, _attrs, _result, name)
5780     _result = _MaxPoolWithArgmaxOutput._make(_result)
5781     return _result
5782 
5783   else:
5784     try:
5785       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
5786         _ctx._context_handle, _ctx._eager_context.device_name,
5787         "MaxPoolWithArgmax", name, _ctx._post_execution_callbacks, input,
5788         "ksize", ksize, "strides", strides, "Targmax", Targmax, "padding",
5789         padding)
5790       _result = _MaxPoolWithArgmaxOutput._make(_result)
5791       return _result
5792     except _core._FallbackException:
5793       return max_pool_with_argmax_eager_fallback(
5794           input, ksize=ksize, strides=strides, Targmax=Targmax,
5795           padding=padding, name=name, ctx=_ctx)
5796     except _core._NotOkStatusException as e:
5797       if name is not None:
5798         message = e.message + " name: " + name
5799       else:
5800         message = e.message
5801       _six.raise_from(_core._status_to_exception(e.code, message), None)
5802 
5803 
5804 def max_pool_with_argmax_eager_fallback(input, ksize, strides, padding, Targmax=_dtypes.int64, name=None, ctx=None):
5805   r"""This is the slowpath function for Eager mode.
5806   This is for function max_pool_with_argmax
5807   """
5808   _ctx = ctx if ctx else _context.context()
5809   if not isinstance(ksize, (list, tuple)):
5810     raise TypeError(
5811         "Expected list for 'ksize' argument to "
5812         "'max_pool_with_argmax' Op, not %r." % ksize)
5813   ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
5814   if not isinstance(strides, (list, tuple)):
5815     raise TypeError(
5816         "Expected list for 'strides' argument to "
5817         "'max_pool_with_argmax' Op, not %r." % strides)
5818   strides = [_execute.make_int(_i, "strides") for _i in strides]
5819   padding = _execute.make_str(padding, "padding")
5820   if Targmax is None:
5821     Targmax = _dtypes.int64
5822   Targmax = _execute.make_type(Targmax, "Targmax")
5823   _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
5824   _inputs_flat = [input]
5825   _attrs = ("ksize", ksize, "strides", strides, "Targmax", Targmax, "padding",
5826   padding, "T", _attr_T)
5827   _result = _execute.execute(b"MaxPoolWithArgmax", 2, inputs=_inputs_flat,
5828                              attrs=_attrs, ctx=_ctx, name=name)
5829   _execute.record_gradient(
5830       "MaxPoolWithArgmax", _inputs_flat, _attrs, _result, name)
5831   _result = _MaxPoolWithArgmaxOutput._make(_result)
5832   return _result
5833 
5834 
5835 def nth_element(input, n, reverse=False, name=None):
5836   r"""Finds values of the `n`-th order statistic for the last dimension.
5837 
5838   If the input is a vector (rank-1), finds the entries which is the nth-smallest
5839   value in the vector and outputs their values as scalar tensor.
5840 
5841   For matrices (resp. higher rank input), computes the entries which is the
5842   nth-smallest value in each row (resp. vector along the last dimension). Thus,
5843 
5844       values.shape = input.shape[:-1]
5845 
5846   Args:
5847     input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
5848       1-D or higher with last dimension at least `n+1`.
5849     n: A `Tensor` of type `int32`.
5850       0-D. Position of sorted vector to select along the last dimension (along
5851       each row for matrices). Valid range of n is `[0, input.shape[:-1])`
5852     reverse: An optional `bool`. Defaults to `False`.
5853       When set to True, find the nth-largest value in the vector and vice
5854       versa.
5855     name: A name for the operation (optional).
5856 
5857   Returns:
5858     A `Tensor`. Has the same type as `input`.
5859   """
5860   _ctx = _context._context
5861   if _ctx is None or not _ctx._eager_context.is_eager:
5862     if reverse is None:
5863       reverse = False
5864     reverse = _execute.make_bool(reverse, "reverse")
5865     _, _, _op = _op_def_lib._apply_op_helper(
5866         "NthElement", input=input, n=n, reverse=reverse, name=name)
5867     _result = _op.outputs[:]
5868     _inputs_flat = _op.inputs
5869     _attrs = ("reverse", _op.get_attr("reverse"), "T", _op.get_attr("T"))
5870     _execute.record_gradient(
5871       "NthElement", _inputs_flat, _attrs, _result, name)
5872     _result, = _result
5873     return _result
5874 
5875   else:
5876     try:
5877       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
5878         _ctx._context_handle, _ctx._eager_context.device_name, "NthElement",
5879         name, _ctx._post_execution_callbacks, input, n, "reverse", reverse)
5880       return _result
5881     except _core._FallbackException:
5882       return nth_element_eager_fallback(
5883           input, n, reverse=reverse, name=name, ctx=_ctx)
5884     except _core._NotOkStatusException as e:
5885       if name is not None:
5886         message = e.message + " name: " + name
5887       else:
5888         message = e.message
5889       _six.raise_from(_core._status_to_exception(e.code, message), None)
5890 
5891 
5892 def nth_element_eager_fallback(input, n, reverse=False, name=None, ctx=None):
5893   r"""This is the slowpath function for Eager mode.
5894   This is for function nth_element
5895   """
5896   _ctx = ctx if ctx else _context.context()
5897   if reverse is None:
5898     reverse = False
5899   reverse = _execute.make_bool(reverse, "reverse")
5900   _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
5901   n = _ops.convert_to_tensor(n, _dtypes.int32)
5902   _inputs_flat = [input, n]
5903   _attrs = ("reverse", reverse, "T", _attr_T)
5904   _result = _execute.execute(b"NthElement", 1, inputs=_inputs_flat,
5905                              attrs=_attrs, ctx=_ctx, name=name)
5906   _execute.record_gradient(
5907       "NthElement", _inputs_flat, _attrs, _result, name)
5908   _result, = _result
5909   return _result
5910 
5911 
5912 _quantized_avg_pool_outputs = ["output", "min_output", "max_output"]
5913 _QuantizedAvgPoolOutput = _collections.namedtuple(
5914     "QuantizedAvgPool", _quantized_avg_pool_outputs)
5915 
5916 
5917 @tf_export('nn.quantized_avg_pool')
5918 def quantized_avg_pool(input, min_input, max_input, ksize, strides, padding, name=None):
5919   r"""Produces the average pool of the input tensor for quantized types.
5920 
5921   Args:
5922     input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
5923       4-D with shape `[batch, height, width, channels]`.
5924     min_input: A `Tensor` of type `float32`.
5925       The float value that the lowest quantized input value represents.
5926     max_input: A `Tensor` of type `float32`.
5927       The float value that the highest quantized input value represents.
5928     ksize: A list of `ints`.
5929       The size of the window for each dimension of the input tensor.
5930       The length must be 4 to match the number of dimensions of the input.
5931     strides: A list of `ints`.
5932       The stride of the sliding window for each dimension of the input
5933       tensor.  The length must be 4 to match the number of dimensions of the input.
5934     padding: A `string` from: `"SAME", "VALID"`.
5935       The type of padding algorithm to use.
5936     name: A name for the operation (optional).
5937 
5938   Returns:
5939     A tuple of `Tensor` objects (output, min_output, max_output).
5940 
5941     output: A `Tensor`. Has the same type as `input`.
5942     min_output: A `Tensor` of type `float32`.
5943     max_output: A `Tensor` of type `float32`.
5944   """
5945   _ctx = _context._context
5946   if _ctx is None or not _ctx._eager_context.is_eager:
5947     if not isinstance(ksize, (list, tuple)):
5948       raise TypeError(
5949           "Expected list for 'ksize' argument to "
5950           "'quantized_avg_pool' Op, not %r." % ksize)
5951     ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
5952     if not isinstance(strides, (list, tuple)):
5953       raise TypeError(
5954           "Expected list for 'strides' argument to "
5955           "'quantized_avg_pool' Op, not %r." % strides)
5956     strides = [_execute.make_int(_i, "strides") for _i in strides]
5957     padding = _execute.make_str(padding, "padding")
5958     _, _, _op = _op_def_lib._apply_op_helper(
5959         "QuantizedAvgPool", input=input, min_input=min_input,
5960         max_input=max_input, ksize=ksize, strides=strides, padding=padding,
5961         name=name)
5962     _result = _op.outputs[:]
5963     _inputs_flat = _op.inputs
5964     _attrs = ("T", _op.get_attr("T"), "ksize", _op.get_attr("ksize"),
5965               "strides", _op.get_attr("strides"), "padding",
5966               _op.get_attr("padding"))
5967     _execute.record_gradient(
5968       "QuantizedAvgPool", _inputs_flat, _attrs, _result, name)
5969     _result = _QuantizedAvgPoolOutput._make(_result)
5970     return _result
5971 
5972   else:
5973     try:
5974       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
5975         _ctx._context_handle, _ctx._eager_context.device_name,
5976         "QuantizedAvgPool", name, _ctx._post_execution_callbacks, input,
5977         min_input, max_input, "ksize", ksize, "strides", strides, "padding",
5978         padding)
5979       _result = _QuantizedAvgPoolOutput._make(_result)
5980       return _result
5981     except _core._FallbackException:
5982       return quantized_avg_pool_eager_fallback(
5983           input, min_input, max_input, ksize=ksize, strides=strides,
5984           padding=padding, name=name, ctx=_ctx)
5985     except _core._NotOkStatusException as e:
5986       if name is not None:
5987         message = e.message + " name: " + name
5988       else:
5989         message = e.message
5990       _six.raise_from(_core._status_to_exception(e.code, message), None)
5991 
5992 
5993 def quantized_avg_pool_eager_fallback(input, min_input, max_input, ksize, strides, padding, name=None, ctx=None):
5994   r"""This is the slowpath function for Eager mode.
5995   This is for function quantized_avg_pool
5996   """
5997   _ctx = ctx if ctx else _context.context()
5998   if not isinstance(ksize, (list, tuple)):
5999     raise TypeError(
6000         "Expected list for 'ksize' argument to "
6001         "'quantized_avg_pool' Op, not %r." % ksize)
6002   ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
6003   if not isinstance(strides, (list, tuple)):
6004     raise TypeError(
6005         "Expected list for 'strides' argument to "
6006         "'quantized_avg_pool' Op, not %r." % strides)
6007   strides = [_execute.make_int(_i, "strides") for _i in strides]
6008   padding = _execute.make_str(padding, "padding")
6009   _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
6010   min_input = _ops.convert_to_tensor(min_input, _dtypes.float32)
6011   max_input = _ops.convert_to_tensor(max_input, _dtypes.float32)
6012   _inputs_flat = [input, min_input, max_input]
6013   _attrs = ("T", _attr_T, "ksize", ksize, "strides", strides, "padding",
6014   padding)
6015   _result = _execute.execute(b"QuantizedAvgPool", 3, inputs=_inputs_flat,
6016                              attrs=_attrs, ctx=_ctx, name=name)
6017   _execute.record_gradient(
6018       "QuantizedAvgPool", _inputs_flat, _attrs, _result, name)
6019   _result = _QuantizedAvgPoolOutput._make(_result)
6020   return _result
6021 
6022 
6023 _quantized_batch_norm_with_global_normalization_outputs = ["result",
6024                                                           "result_min",
6025                                                           "result_max"]
6026 _QuantizedBatchNormWithGlobalNormalizationOutput = _collections.namedtuple(
6027     "QuantizedBatchNormWithGlobalNormalization",
6028     _quantized_batch_norm_with_global_normalization_outputs)
6029 
6030 
6031 def quantized_batch_norm_with_global_normalization(t, t_min, t_max, m, m_min, m_max, v, v_min, v_max, beta, beta_min, beta_max, gamma, gamma_min, gamma_max, out_type, variance_epsilon, scale_after_normalization, name=None):
6032   r"""Quantized Batch normalization.
6033 
6034   This op is deprecated and will be removed in the future. Prefer
6035   `tf.nn.batch_normalization`.
6036 
6037   Args:
6038     t: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
6039       A 4D input Tensor.
6040     t_min: A `Tensor` of type `float32`.
6041       The value represented by the lowest quantized input.
6042     t_max: A `Tensor` of type `float32`.
6043       The value represented by the highest quantized input.
6044     m: A `Tensor`. Must have the same type as `t`.
6045       A 1D mean Tensor with size matching the last dimension of t.
6046       This is the first output from tf.nn.moments,
6047       or a saved moving average thereof.
6048     m_min: A `Tensor` of type `float32`.
6049       The value represented by the lowest quantized mean.
6050     m_max: A `Tensor` of type `float32`.
6051       The value represented by the highest quantized mean.
6052     v: A `Tensor`. Must have the same type as `t`.
6053       A 1D variance Tensor with size matching the last dimension of t.
6054       This is the second output from tf.nn.moments,
6055       or a saved moving average thereof.
6056     v_min: A `Tensor` of type `float32`.
6057       The value represented by the lowest quantized variance.
6058     v_max: A `Tensor` of type `float32`.
6059       The value represented by the highest quantized variance.
6060     beta: A `Tensor`. Must have the same type as `t`.
6061       A 1D beta Tensor with size matching the last dimension of t.
6062       An offset to be added to the normalized tensor.
6063     beta_min: A `Tensor` of type `float32`.
6064       The value represented by the lowest quantized offset.
6065     beta_max: A `Tensor` of type `float32`.
6066       The value represented by the highest quantized offset.
6067     gamma: A `Tensor`. Must have the same type as `t`.
6068       A 1D gamma Tensor with size matching the last dimension of t.
6069       If "scale_after_normalization" is true, this tensor will be multiplied
6070       with the normalized tensor.
6071     gamma_min: A `Tensor` of type `float32`.
6072       The value represented by the lowest quantized gamma.
6073     gamma_max: A `Tensor` of type `float32`.
6074       The value represented by the highest quantized gamma.
6075     out_type: A `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`.
6076     variance_epsilon: A `float`. A small float number to avoid dividing by 0.
6077     scale_after_normalization: A `bool`.
6078       A bool indicating whether the resulted tensor
6079       needs to be multiplied with gamma.
6080     name: A name for the operation (optional).
6081 
6082   Returns:
6083     A tuple of `Tensor` objects (result, result_min, result_max).
6084 
6085     result: A `Tensor` of type `out_type`.
6086     result_min: A `Tensor` of type `float32`.
6087     result_max: A `Tensor` of type `float32`.
6088   """
6089   _ctx = _context._context
6090   if _ctx is None or not _ctx._eager_context.is_eager:
6091     out_type = _execute.make_type(out_type, "out_type")
6092     variance_epsilon = _execute.make_float(variance_epsilon, "variance_epsilon")
6093     scale_after_normalization = _execute.make_bool(scale_after_normalization, "scale_after_normalization")
6094     _, _, _op = _op_def_lib._apply_op_helper(
6095         "QuantizedBatchNormWithGlobalNormalization", t=t, t_min=t_min,
6096         t_max=t_max, m=m, m_min=m_min, m_max=m_max, v=v, v_min=v_min,
6097         v_max=v_max, beta=beta, beta_min=beta_min, beta_max=beta_max,
6098         gamma=gamma, gamma_min=gamma_min, gamma_max=gamma_max,
6099         out_type=out_type, variance_epsilon=variance_epsilon,
6100         scale_after_normalization=scale_after_normalization, name=name)
6101     _result = _op.outputs[:]
6102     _inputs_flat = _op.inputs
6103     _attrs = ("Tinput", _op.get_attr("Tinput"), "out_type",
6104               _op.get_attr("out_type"), "variance_epsilon",
6105               _op.get_attr("variance_epsilon"), "scale_after_normalization",
6106               _op.get_attr("scale_after_normalization"))
6107     _execute.record_gradient(
6108       "QuantizedBatchNormWithGlobalNormalization", _inputs_flat, _attrs, _result, name)
6109     _result = _QuantizedBatchNormWithGlobalNormalizationOutput._make(_result)
6110     return _result
6111 
6112   else:
6113     try:
6114       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
6115         _ctx._context_handle, _ctx._eager_context.device_name,
6116         "QuantizedBatchNormWithGlobalNormalization", name,
6117         _ctx._post_execution_callbacks, t, t_min, t_max, m, m_min, m_max, v,
6118         v_min, v_max, beta, beta_min, beta_max, gamma, gamma_min, gamma_max,
6119         "out_type", out_type, "variance_epsilon", variance_epsilon,
6120         "scale_after_normalization", scale_after_normalization)
6121       _result = _QuantizedBatchNormWithGlobalNormalizationOutput._make(_result)
6122       return _result
6123     except _core._FallbackException:
6124       return quantized_batch_norm_with_global_normalization_eager_fallback(
6125           t, t_min, t_max, m, m_min, m_max, v, v_min, v_max, beta, beta_min,
6126           beta_max, gamma, gamma_min, gamma_max, out_type=out_type,
6127           variance_epsilon=variance_epsilon,
6128           scale_after_normalization=scale_after_normalization, name=name,
6129           ctx=_ctx)
6130     except _core._NotOkStatusException as e:
6131       if name is not None:
6132         message = e.message + " name: " + name
6133       else:
6134         message = e.message
6135       _six.raise_from(_core._status_to_exception(e.code, message), None)
6136 
6137 
6138 def quantized_batch_norm_with_global_normalization_eager_fallback(t, t_min, t_max, m, m_min, m_max, v, v_min, v_max, beta, beta_min, beta_max, gamma, gamma_min, gamma_max, out_type, variance_epsilon, scale_after_normalization, name=None, ctx=None):
6139   r"""This is the slowpath function for Eager mode.
6140   This is for function quantized_batch_norm_with_global_normalization
6141   """
6142   _ctx = ctx if ctx else _context.context()
6143   out_type = _execute.make_type(out_type, "out_type")
6144   variance_epsilon = _execute.make_float(variance_epsilon, "variance_epsilon")
6145   scale_after_normalization = _execute.make_bool(scale_after_normalization, "scale_after_normalization")
6146   _attr_Tinput, _inputs_Tinput = _execute.args_to_matching_eager([t, m, v, beta, gamma], _ctx)
6147   (t, m, v, beta, gamma) = _inputs_Tinput
6148   t_min = _ops.convert_to_tensor(t_min, _dtypes.float32)
6149   t_max = _ops.convert_to_tensor(t_max, _dtypes.float32)
6150   m_min = _ops.convert_to_tensor(m_min, _dtypes.float32)
6151   m_max = _ops.convert_to_tensor(m_max, _dtypes.float32)
6152   v_min = _ops.convert_to_tensor(v_min, _dtypes.float32)
6153   v_max = _ops.convert_to_tensor(v_max, _dtypes.float32)
6154   beta_min = _ops.convert_to_tensor(beta_min, _dtypes.float32)
6155   beta_max = _ops.convert_to_tensor(beta_max, _dtypes.float32)
6156   gamma_min = _ops.convert_to_tensor(gamma_min, _dtypes.float32)
6157   gamma_max = _ops.convert_to_tensor(gamma_max, _dtypes.float32)
6158   _inputs_flat = [t, t_min, t_max, m, m_min, m_max, v, v_min, v_max, beta, beta_min, beta_max, gamma, gamma_min, gamma_max]
6159   _attrs = ("Tinput", _attr_Tinput, "out_type", out_type, "variance_epsilon",
6160   variance_epsilon, "scale_after_normalization", scale_after_normalization)
6161   _result = _execute.execute(b"QuantizedBatchNormWithGlobalNormalization", 3,
6162                              inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
6163                              name=name)
6164   _execute.record_gradient(
6165       "QuantizedBatchNormWithGlobalNormalization", _inputs_flat, _attrs, _result, name)
6166   _result = _QuantizedBatchNormWithGlobalNormalizationOutput._make(_result)
6167   return _result
6168 
6169 
6170 _quantized_bias_add_outputs = ["output", "min_out", "max_out"]
6171 _QuantizedBiasAddOutput = _collections.namedtuple(
6172     "QuantizedBiasAdd", _quantized_bias_add_outputs)
6173 
6174 
6175 def quantized_bias_add(input, bias, min_input, max_input, min_bias, max_bias, out_type, name=None):
6176   r"""Adds Tensor 'bias' to Tensor 'input' for Quantized types.
6177 
6178   Broadcasts the values of bias on dimensions 0..N-2 of 'input'.
6179 
6180   Args:
6181     input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
6182     bias: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
6183       A 1D bias Tensor with size matching the last dimension of 'input'.
6184     min_input: A `Tensor` of type `float32`.
6185       The float value that the lowest quantized input value represents.
6186     max_input: A `Tensor` of type `float32`.
6187       The float value that the highest quantized input value represents.
6188     min_bias: A `Tensor` of type `float32`.
6189       The float value that the lowest quantized bias value represents.
6190     max_bias: A `Tensor` of type `float32`.
6191       The float value that the highest quantized bias value represents.
6192     out_type: A `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`.
6193     name: A name for the operation (optional).
6194 
6195   Returns:
6196     A tuple of `Tensor` objects (output, min_out, max_out).
6197 
6198     output: A `Tensor` of type `out_type`.
6199     min_out: A `Tensor` of type `float32`.
6200     max_out: A `Tensor` of type `float32`.
6201   """
6202   _ctx = _context._context
6203   if _ctx is None or not _ctx._eager_context.is_eager:
6204     out_type = _execute.make_type(out_type, "out_type")
6205     _, _, _op = _op_def_lib._apply_op_helper(
6206         "QuantizedBiasAdd", input=input, bias=bias, min_input=min_input,
6207         max_input=max_input, min_bias=min_bias, max_bias=max_bias,
6208         out_type=out_type, name=name)
6209     _result = _op.outputs[:]
6210     _inputs_flat = _op.inputs
6211     _attrs = ("T1", _op.get_attr("T1"), "T2", _op.get_attr("T2"), "out_type",
6212               _op.get_attr("out_type"))
6213     _execute.record_gradient(
6214       "QuantizedBiasAdd", _inputs_flat, _attrs, _result, name)
6215     _result = _QuantizedBiasAddOutput._make(_result)
6216     return _result
6217 
6218   else:
6219     try:
6220       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
6221         _ctx._context_handle, _ctx._eager_context.device_name,
6222         "QuantizedBiasAdd", name, _ctx._post_execution_callbacks, input, bias,
6223         min_input, max_input, min_bias, max_bias, "out_type", out_type)
6224       _result = _QuantizedBiasAddOutput._make(_result)
6225       return _result
6226     except _core._FallbackException:
6227       return quantized_bias_add_eager_fallback(
6228           input, bias, min_input, max_input, min_bias, max_bias,
6229           out_type=out_type, name=name, ctx=_ctx)
6230     except _core._NotOkStatusException as e:
6231       if name is not None:
6232         message = e.message + " name: " + name
6233       else:
6234         message = e.message
6235       _six.raise_from(_core._status_to_exception(e.code, message), None)
6236 
6237 
6238 def quantized_bias_add_eager_fallback(input, bias, min_input, max_input, min_bias, max_bias, out_type, name=None, ctx=None):
6239   r"""This is the slowpath function for Eager mode.
6240   This is for function quantized_bias_add
6241   """
6242   _ctx = ctx if ctx else _context.context()
6243   out_type = _execute.make_type(out_type, "out_type")
6244   _attr_T1, (input,) = _execute.args_to_matching_eager([input], _ctx)
6245   _attr_T2, (bias,) = _execute.args_to_matching_eager([bias], _ctx)
6246   min_input = _ops.convert_to_tensor(min_input, _dtypes.float32)
6247   max_input = _ops.convert_to_tensor(max_input, _dtypes.float32)
6248   min_bias = _ops.convert_to_tensor(min_bias, _dtypes.float32)
6249   max_bias = _ops.convert_to_tensor(max_bias, _dtypes.float32)
6250   _inputs_flat = [input, bias, min_input, max_input, min_bias, max_bias]
6251   _attrs = ("T1", _attr_T1, "T2", _attr_T2, "out_type", out_type)
6252   _result = _execute.execute(b"QuantizedBiasAdd", 3, inputs=_inputs_flat,
6253                              attrs=_attrs, ctx=_ctx, name=name)
6254   _execute.record_gradient(
6255       "QuantizedBiasAdd", _inputs_flat, _attrs, _result, name)
6256   _result = _QuantizedBiasAddOutput._make(_result)
6257   return _result
6258 
6259 
6260 _quantized_conv2d_outputs = ["output", "min_output", "max_output"]
6261 _QuantizedConv2DOutput = _collections.namedtuple(
6262     "QuantizedConv2D", _quantized_conv2d_outputs)
6263 
6264 
6265 @tf_export('nn.quantized_conv2d')
6266 def quantized_conv2d(input, filter, min_input, max_input, min_filter, max_filter, strides, padding, out_type=_dtypes.qint32, dilations=[1, 1, 1, 1], name=None):
6267   r"""Computes a 2D convolution given quantized 4D input and filter tensors.
6268 
6269   The inputs are quantized tensors where the lowest value represents the real
6270   number of the associated minimum, and the highest represents the maximum.
6271   This means that you can only interpret the quantized output in the same way, by
6272   taking the returned minimum and maximum values into account.
6273 
6274   Args:
6275     input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
6276     filter: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
6277       filter's input_depth dimension must match input's depth dimensions.
6278     min_input: A `Tensor` of type `float32`.
6279       The float value that the lowest quantized input value represents.
6280     max_input: A `Tensor` of type `float32`.
6281       The float value that the highest quantized input value represents.
6282     min_filter: A `Tensor` of type `float32`.
6283       The float value that the lowest quantized filter value represents.
6284     max_filter: A `Tensor` of type `float32`.
6285       The float value that the highest quantized filter value represents.
6286     strides: A list of `ints`.
6287       The stride of the sliding window for each dimension of the input
6288       tensor.
6289     padding: A `string` from: `"SAME", "VALID"`.
6290       The type of padding algorithm to use.
6291     out_type: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.qint32`.
6292     dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
6293       1-D tensor of length 4.  The dilation factor for each dimension of
6294       `input`. If set to k > 1, there will be k-1 skipped cells between each
6295       filter element on that dimension. The dimension order is determined by the
6296       value of `data_format`, see above for details. Dilations in the batch and
6297       depth dimensions must be 1.
6298     name: A name for the operation (optional).
6299 
6300   Returns:
6301     A tuple of `Tensor` objects (output, min_output, max_output).
6302 
6303     output: A `Tensor` of type `out_type`.
6304     min_output: A `Tensor` of type `float32`.
6305     max_output: A `Tensor` of type `float32`.
6306   """
6307   _ctx = _context._context
6308   if _ctx is None or not _ctx._eager_context.is_eager:
6309     if not isinstance(strides, (list, tuple)):
6310       raise TypeError(
6311           "Expected list for 'strides' argument to "
6312           "'quantized_conv2d' Op, not %r." % strides)
6313     strides = [_execute.make_int(_i, "strides") for _i in strides]
6314     padding = _execute.make_str(padding, "padding")
6315     if out_type is None:
6316       out_type = _dtypes.qint32
6317     out_type = _execute.make_type(out_type, "out_type")
6318     if dilations is None:
6319       dilations = [1, 1, 1, 1]
6320     if not isinstance(dilations, (list, tuple)):
6321       raise TypeError(
6322           "Expected list for 'dilations' argument to "
6323           "'quantized_conv2d' Op, not %r." % dilations)
6324     dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
6325     _, _, _op = _op_def_lib._apply_op_helper(
6326         "QuantizedConv2D", input=input, filter=filter, min_input=min_input,
6327         max_input=max_input, min_filter=min_filter, max_filter=max_filter,
6328         strides=strides, padding=padding, out_type=out_type,
6329         dilations=dilations, name=name)
6330     _result = _op.outputs[:]
6331     _inputs_flat = _op.inputs
6332     _attrs = ("Tinput", _op.get_attr("Tinput"), "Tfilter",
6333               _op.get_attr("Tfilter"), "out_type", _op.get_attr("out_type"),
6334               "strides", _op.get_attr("strides"), "padding",
6335               _op.get_attr("padding"), "dilations", _op.get_attr("dilations"))
6336     _execute.record_gradient(
6337       "QuantizedConv2D", _inputs_flat, _attrs, _result, name)
6338     _result = _QuantizedConv2DOutput._make(_result)
6339     return _result
6340 
6341   else:
6342     try:
6343       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
6344         _ctx._context_handle, _ctx._eager_context.device_name,
6345         "QuantizedConv2D", name, _ctx._post_execution_callbacks, input,
6346         filter, min_input, max_input, min_filter, max_filter, "out_type",
6347         out_type, "strides", strides, "padding", padding, "dilations",
6348         dilations)
6349       _result = _QuantizedConv2DOutput._make(_result)
6350       return _result
6351     except _core._FallbackException:
6352       return quantized_conv2d_eager_fallback(
6353           input, filter, min_input, max_input, min_filter, max_filter,
6354           out_type=out_type, strides=strides, padding=padding,
6355           dilations=dilations, name=name, ctx=_ctx)
6356     except _core._NotOkStatusException as e:
6357       if name is not None:
6358         message = e.message + " name: " + name
6359       else:
6360         message = e.message
6361       _six.raise_from(_core._status_to_exception(e.code, message), None)
6362 
6363 
6364 def quantized_conv2d_eager_fallback(input, filter, min_input, max_input, min_filter, max_filter, strides, padding, out_type=_dtypes.qint32, dilations=[1, 1, 1, 1], name=None, ctx=None):
6365   r"""This is the slowpath function for Eager mode.
6366   This is for function quantized_conv2d
6367   """
6368   _ctx = ctx if ctx else _context.context()
6369   if not isinstance(strides, (list, tuple)):
6370     raise TypeError(
6371         "Expected list for 'strides' argument to "
6372         "'quantized_conv2d' Op, not %r." % strides)
6373   strides = [_execute.make_int(_i, "strides") for _i in strides]
6374   padding = _execute.make_str(padding, "padding")
6375   if out_type is None:
6376     out_type = _dtypes.qint32
6377   out_type = _execute.make_type(out_type, "out_type")
6378   if dilations is None:
6379     dilations = [1, 1, 1, 1]
6380   if not isinstance(dilations, (list, tuple)):
6381     raise TypeError(
6382         "Expected list for 'dilations' argument to "
6383         "'quantized_conv2d' Op, not %r." % dilations)
6384   dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
6385   _attr_Tinput, (input,) = _execute.args_to_matching_eager([input], _ctx)
6386   _attr_Tfilter, (filter,) = _execute.args_to_matching_eager([filter], _ctx)
6387   min_input = _ops.convert_to_tensor(min_input, _dtypes.float32)
6388   max_input = _ops.convert_to_tensor(max_input, _dtypes.float32)
6389   min_filter = _ops.convert_to_tensor(min_filter, _dtypes.float32)
6390   max_filter = _ops.convert_to_tensor(max_filter, _dtypes.float32)
6391   _inputs_flat = [input, filter, min_input, max_input, min_filter, max_filter]
6392   _attrs = ("Tinput", _attr_Tinput, "Tfilter", _attr_Tfilter, "out_type",
6393   out_type, "strides", strides, "padding", padding, "dilations", dilations)
6394   _result = _execute.execute(b"QuantizedConv2D", 3, inputs=_inputs_flat,
6395                              attrs=_attrs, ctx=_ctx, name=name)
6396   _execute.record_gradient(
6397       "QuantizedConv2D", _inputs_flat, _attrs, _result, name)
6398   _result = _QuantizedConv2DOutput._make(_result)
6399   return _result
6400 
6401 
6402 _quantized_max_pool_outputs = ["output", "min_output", "max_output"]
6403 _QuantizedMaxPoolOutput = _collections.namedtuple(
6404     "QuantizedMaxPool", _quantized_max_pool_outputs)
6405 
6406 
6407 @tf_export('nn.quantized_max_pool')
6408 def quantized_max_pool(input, min_input, max_input, ksize, strides, padding, name=None):
6409   r"""Produces the max pool of the input tensor for quantized types.
6410 
6411   Args:
6412     input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
6413       The 4D (batch x rows x cols x depth) Tensor to MaxReduce over.
6414     min_input: A `Tensor` of type `float32`.
6415       The float value that the lowest quantized input value represents.
6416     max_input: A `Tensor` of type `float32`.
6417       The float value that the highest quantized input value represents.
6418     ksize: A list of `ints`.
6419       The size of the window for each dimension of the input tensor.
6420       The length must be 4 to match the number of dimensions of the input.
6421     strides: A list of `ints`.
6422       The stride of the sliding window for each dimension of the input
6423       tensor. The length must be 4 to match the number of dimensions of the input.
6424     padding: A `string` from: `"SAME", "VALID"`.
6425       The type of padding algorithm to use.
6426     name: A name for the operation (optional).
6427 
6428   Returns:
6429     A tuple of `Tensor` objects (output, min_output, max_output).
6430 
6431     output: A `Tensor`. Has the same type as `input`.
6432     min_output: A `Tensor` of type `float32`.
6433     max_output: A `Tensor` of type `float32`.
6434   """
6435   _ctx = _context._context
6436   if _ctx is None or not _ctx._eager_context.is_eager:
6437     if not isinstance(ksize, (list, tuple)):
6438       raise TypeError(
6439           "Expected list for 'ksize' argument to "
6440           "'quantized_max_pool' Op, not %r." % ksize)
6441     ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
6442     if not isinstance(strides, (list, tuple)):
6443       raise TypeError(
6444           "Expected list for 'strides' argument to "
6445           "'quantized_max_pool' Op, not %r." % strides)
6446     strides = [_execute.make_int(_i, "strides") for _i in strides]
6447     padding = _execute.make_str(padding, "padding")
6448     _, _, _op = _op_def_lib._apply_op_helper(
6449         "QuantizedMaxPool", input=input, min_input=min_input,
6450         max_input=max_input, ksize=ksize, strides=strides, padding=padding,
6451         name=name)
6452     _result = _op.outputs[:]
6453     _inputs_flat = _op.inputs
6454     _attrs = ("T", _op.get_attr("T"), "ksize", _op.get_attr("ksize"),
6455               "strides", _op.get_attr("strides"), "padding",
6456               _op.get_attr("padding"))
6457     _execute.record_gradient(
6458       "QuantizedMaxPool", _inputs_flat, _attrs, _result, name)
6459     _result = _QuantizedMaxPoolOutput._make(_result)
6460     return _result
6461 
6462   else:
6463     try:
6464       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
6465         _ctx._context_handle, _ctx._eager_context.device_name,
6466         "QuantizedMaxPool", name, _ctx._post_execution_callbacks, input,
6467         min_input, max_input, "ksize", ksize, "strides", strides, "padding",
6468         padding)
6469       _result = _QuantizedMaxPoolOutput._make(_result)
6470       return _result
6471     except _core._FallbackException:
6472       return quantized_max_pool_eager_fallback(
6473           input, min_input, max_input, ksize=ksize, strides=strides,
6474           padding=padding, name=name, ctx=_ctx)
6475     except _core._NotOkStatusException as e:
6476       if name is not None:
6477         message = e.message + " name: " + name
6478       else:
6479         message = e.message
6480       _six.raise_from(_core._status_to_exception(e.code, message), None)
6481 
6482 
6483 def quantized_max_pool_eager_fallback(input, min_input, max_input, ksize, strides, padding, name=None, ctx=None):
6484   r"""This is the slowpath function for Eager mode.
6485   This is for function quantized_max_pool
6486   """
6487   _ctx = ctx if ctx else _context.context()
6488   if not isinstance(ksize, (list, tuple)):
6489     raise TypeError(
6490         "Expected list for 'ksize' argument to "
6491         "'quantized_max_pool' Op, not %r." % ksize)
6492   ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
6493   if not isinstance(strides, (list, tuple)):
6494     raise TypeError(
6495         "Expected list for 'strides' argument to "
6496         "'quantized_max_pool' Op, not %r." % strides)
6497   strides = [_execute.make_int(_i, "strides") for _i in strides]
6498   padding = _execute.make_str(padding, "padding")
6499   _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
6500   min_input = _ops.convert_to_tensor(min_input, _dtypes.float32)
6501   max_input = _ops.convert_to_tensor(max_input, _dtypes.float32)
6502   _inputs_flat = [input, min_input, max_input]
6503   _attrs = ("T", _attr_T, "ksize", ksize, "strides", strides, "padding",
6504   padding)
6505   _result = _execute.execute(b"QuantizedMaxPool", 3, inputs=_inputs_flat,
6506                              attrs=_attrs, ctx=_ctx, name=name)
6507   _execute.record_gradient(
6508       "QuantizedMaxPool", _inputs_flat, _attrs, _result, name)
6509   _result = _QuantizedMaxPoolOutput._make(_result)
6510   return _result
6511 
6512 
6513 _quantized_relu_outputs = ["activations", "min_activations",
6514                           "max_activations"]
6515 _QuantizedReluOutput = _collections.namedtuple(
6516     "QuantizedRelu", _quantized_relu_outputs)
6517 
6518 
6519 def quantized_relu(features, min_features, max_features, out_type=_dtypes.quint8, name=None):
6520   r"""Computes Quantized Rectified Linear: `max(features, 0)`
6521 
6522   Args:
6523     features: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
6524     min_features: A `Tensor` of type `float32`.
6525       The float value that the lowest quantized value represents.
6526     max_features: A `Tensor` of type `float32`.
6527       The float value that the highest quantized value represents.
6528     out_type: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.quint8`.
6529     name: A name for the operation (optional).
6530 
6531   Returns:
6532     A tuple of `Tensor` objects (activations, min_activations, max_activations).
6533 
6534     activations: A `Tensor` of type `out_type`.
6535     min_activations: A `Tensor` of type `float32`.
6536     max_activations: A `Tensor` of type `float32`.
6537   """
6538   _ctx = _context._context
6539   if _ctx is None or not _ctx._eager_context.is_eager:
6540     if out_type is None:
6541       out_type = _dtypes.quint8
6542     out_type = _execute.make_type(out_type, "out_type")
6543     _, _, _op = _op_def_lib._apply_op_helper(
6544         "QuantizedRelu", features=features, min_features=min_features,
6545         max_features=max_features, out_type=out_type, name=name)
6546     _result = _op.outputs[:]
6547     _inputs_flat = _op.inputs
6548     _attrs = ("Tinput", _op.get_attr("Tinput"), "out_type",
6549               _op.get_attr("out_type"))
6550     _execute.record_gradient(
6551       "QuantizedRelu", _inputs_flat, _attrs, _result, name)
6552     _result = _QuantizedReluOutput._make(_result)
6553     return _result
6554 
6555   else:
6556     try:
6557       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
6558         _ctx._context_handle, _ctx._eager_context.device_name,
6559         "QuantizedRelu", name, _ctx._post_execution_callbacks, features,
6560         min_features, max_features, "out_type", out_type)
6561       _result = _QuantizedReluOutput._make(_result)
6562       return _result
6563     except _core._FallbackException:
6564       return quantized_relu_eager_fallback(
6565           features, min_features, max_features, out_type=out_type, name=name,
6566           ctx=_ctx)
6567     except _core._NotOkStatusException as e:
6568       if name is not None:
6569         message = e.message + " name: " + name
6570       else:
6571         message = e.message
6572       _six.raise_from(_core._status_to_exception(e.code, message), None)
6573 
6574 
6575 def quantized_relu_eager_fallback(features, min_features, max_features, out_type=_dtypes.quint8, name=None, ctx=None):
6576   r"""This is the slowpath function for Eager mode.
6577   This is for function quantized_relu
6578   """
6579   _ctx = ctx if ctx else _context.context()
6580   if out_type is None:
6581     out_type = _dtypes.quint8
6582   out_type = _execute.make_type(out_type, "out_type")
6583   _attr_Tinput, (features,) = _execute.args_to_matching_eager([features], _ctx)
6584   min_features = _ops.convert_to_tensor(min_features, _dtypes.float32)
6585   max_features = _ops.convert_to_tensor(max_features, _dtypes.float32)
6586   _inputs_flat = [features, min_features, max_features]
6587   _attrs = ("Tinput", _attr_Tinput, "out_type", out_type)
6588   _result = _execute.execute(b"QuantizedRelu", 3, inputs=_inputs_flat,
6589                              attrs=_attrs, ctx=_ctx, name=name)
6590   _execute.record_gradient(
6591       "QuantizedRelu", _inputs_flat, _attrs, _result, name)
6592   _result = _QuantizedReluOutput._make(_result)
6593   return _result
6594 
6595 
6596 _quantized_relu6_outputs = ["activations", "min_activations",
6597                            "max_activations"]
6598 _QuantizedRelu6Output = _collections.namedtuple(
6599     "QuantizedRelu6", _quantized_relu6_outputs)
6600 
6601 
6602 def quantized_relu6(features, min_features, max_features, out_type=_dtypes.quint8, name=None):
6603   r"""Computes Quantized Rectified Linear 6: `min(max(features, 0), 6)`
6604 
6605   Args:
6606     features: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
6607     min_features: A `Tensor` of type `float32`.
6608       The float value that the lowest quantized value represents.
6609     max_features: A `Tensor` of type `float32`.
6610       The float value that the highest quantized value represents.
6611     out_type: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.quint8`.
6612     name: A name for the operation (optional).
6613 
6614   Returns:
6615     A tuple of `Tensor` objects (activations, min_activations, max_activations).
6616 
6617     activations: A `Tensor` of type `out_type`.
6618     min_activations: A `Tensor` of type `float32`.
6619     max_activations: A `Tensor` of type `float32`.
6620   """
6621   _ctx = _context._context
6622   if _ctx is None or not _ctx._eager_context.is_eager:
6623     if out_type is None:
6624       out_type = _dtypes.quint8
6625     out_type = _execute.make_type(out_type, "out_type")
6626     _, _, _op = _op_def_lib._apply_op_helper(
6627         "QuantizedRelu6", features=features, min_features=min_features,
6628         max_features=max_features, out_type=out_type, name=name)
6629     _result = _op.outputs[:]
6630     _inputs_flat = _op.inputs
6631     _attrs = ("Tinput", _op.get_attr("Tinput"), "out_type",
6632               _op.get_attr("out_type"))
6633     _execute.record_gradient(
6634       "QuantizedRelu6", _inputs_flat, _attrs, _result, name)
6635     _result = _QuantizedRelu6Output._make(_result)
6636     return _result
6637 
6638   else:
6639     try:
6640       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
6641         _ctx._context_handle, _ctx._eager_context.device_name,
6642         "QuantizedRelu6", name, _ctx._post_execution_callbacks, features,
6643         min_features, max_features, "out_type", out_type)
6644       _result = _QuantizedRelu6Output._make(_result)
6645       return _result
6646     except _core._FallbackException:
6647       return quantized_relu6_eager_fallback(
6648           features, min_features, max_features, out_type=out_type, name=name,
6649           ctx=_ctx)
6650     except _core._NotOkStatusException as e:
6651       if name is not None:
6652         message = e.message + " name: " + name
6653       else:
6654         message = e.message
6655       _six.raise_from(_core._status_to_exception(e.code, message), None)
6656 
6657 
6658 def quantized_relu6_eager_fallback(features, min_features, max_features, out_type=_dtypes.quint8, name=None, ctx=None):
6659   r"""This is the slowpath function for Eager mode.
6660   This is for function quantized_relu6
6661   """
6662   _ctx = ctx if ctx else _context.context()
6663   if out_type is None:
6664     out_type = _dtypes.quint8
6665   out_type = _execute.make_type(out_type, "out_type")
6666   _attr_Tinput, (features,) = _execute.args_to_matching_eager([features], _ctx)
6667   min_features = _ops.convert_to_tensor(min_features, _dtypes.float32)
6668   max_features = _ops.convert_to_tensor(max_features, _dtypes.float32)
6669   _inputs_flat = [features, min_features, max_features]
6670   _attrs = ("Tinput", _attr_Tinput, "out_type", out_type)
6671   _result = _execute.execute(b"QuantizedRelu6", 3, inputs=_inputs_flat,
6672                              attrs=_attrs, ctx=_ctx, name=name)
6673   _execute.record_gradient(
6674       "QuantizedRelu6", _inputs_flat, _attrs, _result, name)
6675   _result = _QuantizedRelu6Output._make(_result)
6676   return _result
6677 
6678 
6679 _quantized_relu_x_outputs = ["activations", "min_activations",
6680                             "max_activations"]
6681 _QuantizedReluXOutput = _collections.namedtuple(
6682     "QuantizedReluX", _quantized_relu_x_outputs)
6683 
6684 
6685 @tf_export('nn.quantized_relu_x')
6686 def quantized_relu_x(features, max_value, min_features, max_features, out_type=_dtypes.quint8, name=None):
6687   r"""Computes Quantized Rectified Linear X: `min(max(features, 0), max_value)`
6688 
6689   Args:
6690     features: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
6691     max_value: A `Tensor` of type `float32`.
6692     min_features: A `Tensor` of type `float32`.
6693       The float value that the lowest quantized value represents.
6694     max_features: A `Tensor` of type `float32`.
6695       The float value that the highest quantized value represents.
6696     out_type: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.quint8`.
6697     name: A name for the operation (optional).
6698 
6699   Returns:
6700     A tuple of `Tensor` objects (activations, min_activations, max_activations).
6701 
6702     activations: A `Tensor` of type `out_type`.
6703     min_activations: A `Tensor` of type `float32`.
6704     max_activations: A `Tensor` of type `float32`.
6705   """
6706   _ctx = _context._context
6707   if _ctx is None or not _ctx._eager_context.is_eager:
6708     if out_type is None:
6709       out_type = _dtypes.quint8
6710     out_type = _execute.make_type(out_type, "out_type")
6711     _, _, _op = _op_def_lib._apply_op_helper(
6712         "QuantizedReluX", features=features, max_value=max_value,
6713         min_features=min_features, max_features=max_features,
6714         out_type=out_type, name=name)
6715     _result = _op.outputs[:]
6716     _inputs_flat = _op.inputs
6717     _attrs = ("Tinput", _op.get_attr("Tinput"), "out_type",
6718               _op.get_attr("out_type"))
6719     _execute.record_gradient(
6720       "QuantizedReluX", _inputs_flat, _attrs, _result, name)
6721     _result = _QuantizedReluXOutput._make(_result)
6722     return _result
6723 
6724   else:
6725     try:
6726       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
6727         _ctx._context_handle, _ctx._eager_context.device_name,
6728         "QuantizedReluX", name, _ctx._post_execution_callbacks, features,
6729         max_value, min_features, max_features, "out_type", out_type)
6730       _result = _QuantizedReluXOutput._make(_result)
6731       return _result
6732     except _core._FallbackException:
6733       return quantized_relu_x_eager_fallback(
6734           features, max_value, min_features, max_features, out_type=out_type,
6735           name=name, ctx=_ctx)
6736     except _core._NotOkStatusException as e:
6737       if name is not None:
6738         message = e.message + " name: " + name
6739       else:
6740         message = e.message
6741       _six.raise_from(_core._status_to_exception(e.code, message), None)
6742 
6743 
6744 def quantized_relu_x_eager_fallback(features, max_value, min_features, max_features, out_type=_dtypes.quint8, name=None, ctx=None):
6745   r"""This is the slowpath function for Eager mode.
6746   This is for function quantized_relu_x
6747   """
6748   _ctx = ctx if ctx else _context.context()
6749   if out_type is None:
6750     out_type = _dtypes.quint8
6751   out_type = _execute.make_type(out_type, "out_type")
6752   _attr_Tinput, (features,) = _execute.args_to_matching_eager([features], _ctx)
6753   max_value = _ops.convert_to_tensor(max_value, _dtypes.float32)
6754   min_features = _ops.convert_to_tensor(min_features, _dtypes.float32)
6755   max_features = _ops.convert_to_tensor(max_features, _dtypes.float32)
6756   _inputs_flat = [features, max_value, min_features, max_features]
6757   _attrs = ("Tinput", _attr_Tinput, "out_type", out_type)
6758   _result = _execute.execute(b"QuantizedReluX", 3, inputs=_inputs_flat,
6759                              attrs=_attrs, ctx=_ctx, name=name)
6760   _execute.record_gradient(
6761       "QuantizedReluX", _inputs_flat, _attrs, _result, name)
6762   _result = _QuantizedReluXOutput._make(_result)
6763   return _result
6764 
6765 
6766 @tf_export('nn.relu')
6767 def relu(features, name=None):
6768   r"""Computes rectified linear: `max(features, 0)`.
6769 
6770   Args:
6771     features: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`, `qint8`.
6772     name: A name for the operation (optional).
6773 
6774   Returns:
6775     A `Tensor`. Has the same type as `features`.
6776   """
6777   _ctx = _context._context
6778   if _ctx is None or not _ctx._eager_context.is_eager:
6779     _, _, _op = _op_def_lib._apply_op_helper(
6780         "Relu", features=features, name=name)
6781     _result = _op.outputs[:]
6782     _inputs_flat = _op.inputs
6783     _attrs = ("T", _op.get_attr("T"))
6784     _execute.record_gradient(
6785       "Relu", _inputs_flat, _attrs, _result, name)
6786     _result, = _result
6787     return _result
6788 
6789   else:
6790     try:
6791       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
6792         _ctx._context_handle, _ctx._eager_context.device_name, "Relu", name,
6793         _ctx._post_execution_callbacks, features)
6794       return _result
6795     except _core._FallbackException:
6796       return relu_eager_fallback(
6797           features, name=name, ctx=_ctx)
6798     except _core._NotOkStatusException as e:
6799       if name is not None:
6800         message = e.message + " name: " + name
6801       else:
6802         message = e.message
6803       _six.raise_from(_core._status_to_exception(e.code, message), None)
6804 
6805 
6806 def relu_eager_fallback(features, name=None, ctx=None):
6807   r"""This is the slowpath function for Eager mode.
6808   This is for function relu
6809   """
6810   _ctx = ctx if ctx else _context.context()
6811   _attr_T, (features,) = _execute.args_to_matching_eager([features], _ctx)
6812   _inputs_flat = [features]
6813   _attrs = ("T", _attr_T)
6814   _result = _execute.execute(b"Relu", 1, inputs=_inputs_flat, attrs=_attrs,
6815                              ctx=_ctx, name=name)
6816   _execute.record_gradient(
6817       "Relu", _inputs_flat, _attrs, _result, name)
6818   _result, = _result
6819   return _result
6820 
6821 
6822 def relu6(features, name=None):
6823   r"""Computes rectified linear 6: `min(max(features, 0), 6)`.
6824 
6825   Args:
6826     features: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
6827     name: A name for the operation (optional).
6828 
6829   Returns:
6830     A `Tensor`. Has the same type as `features`.
6831   """
6832   _ctx = _context._context
6833   if _ctx is None or not _ctx._eager_context.is_eager:
6834     _, _, _op = _op_def_lib._apply_op_helper(
6835         "Relu6", features=features, name=name)
6836     _result = _op.outputs[:]
6837     _inputs_flat = _op.inputs
6838     _attrs = ("T", _op.get_attr("T"))
6839     _execute.record_gradient(
6840       "Relu6", _inputs_flat, _attrs, _result, name)
6841     _result, = _result
6842     return _result
6843 
6844   else:
6845     try:
6846       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
6847         _ctx._context_handle, _ctx._eager_context.device_name, "Relu6", name,
6848         _ctx._post_execution_callbacks, features)
6849       return _result
6850     except _core._FallbackException:
6851       return relu6_eager_fallback(
6852           features, name=name, ctx=_ctx)
6853     except _core._NotOkStatusException as e:
6854       if name is not None:
6855         message = e.message + " name: " + name
6856       else:
6857         message = e.message
6858       _six.raise_from(_core._status_to_exception(e.code, message), None)
6859 
6860 
6861 def relu6_eager_fallback(features, name=None, ctx=None):
6862   r"""This is the slowpath function for Eager mode.
6863   This is for function relu6
6864   """
6865   _ctx = ctx if ctx else _context.context()
6866   _attr_T, (features,) = _execute.args_to_matching_eager([features], _ctx)
6867   _inputs_flat = [features]
6868   _attrs = ("T", _attr_T)
6869   _result = _execute.execute(b"Relu6", 1, inputs=_inputs_flat, attrs=_attrs,
6870                              ctx=_ctx, name=name)
6871   _execute.record_gradient(
6872       "Relu6", _inputs_flat, _attrs, _result, name)
6873   _result, = _result
6874   return _result
6875 
6876 
6877 def relu6_grad(gradients, features, name=None):
6878   r"""Computes rectified linear 6 gradients for a Relu6 operation.
6879 
6880   Args:
6881     gradients: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
6882       The backpropagated gradients to the corresponding Relu6 operation.
6883     features: A `Tensor`. Must have the same type as `gradients`.
6884       The features passed as input to the corresponding Relu6 operation, or
6885       its output; using either one produces the same result.
6886     name: A name for the operation (optional).
6887 
6888   Returns:
6889     A `Tensor`. Has the same type as `gradients`.
6890   """
6891   _ctx = _context._context
6892   if _ctx is None or not _ctx._eager_context.is_eager:
6893     _, _, _op = _op_def_lib._apply_op_helper(
6894         "Relu6Grad", gradients=gradients, features=features, name=name)
6895     _result = _op.outputs[:]
6896     _inputs_flat = _op.inputs
6897     _attrs = ("T", _op.get_attr("T"))
6898     _execute.record_gradient(
6899       "Relu6Grad", _inputs_flat, _attrs, _result, name)
6900     _result, = _result
6901     return _result
6902 
6903   else:
6904     try:
6905       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
6906         _ctx._context_handle, _ctx._eager_context.device_name, "Relu6Grad",
6907         name, _ctx._post_execution_callbacks, gradients, features)
6908       return _result
6909     except _core._FallbackException:
6910       return relu6_grad_eager_fallback(
6911           gradients, features, name=name, ctx=_ctx)
6912     except _core._NotOkStatusException as e:
6913       if name is not None:
6914         message = e.message + " name: " + name
6915       else:
6916         message = e.message
6917       _six.raise_from(_core._status_to_exception(e.code, message), None)
6918 
6919 
6920 def relu6_grad_eager_fallback(gradients, features, name=None, ctx=None):
6921   r"""This is the slowpath function for Eager mode.
6922   This is for function relu6_grad
6923   """
6924   _ctx = ctx if ctx else _context.context()
6925   _attr_T, _inputs_T = _execute.args_to_matching_eager([gradients, features], _ctx)
6926   (gradients, features) = _inputs_T
6927   _inputs_flat = [gradients, features]
6928   _attrs = ("T", _attr_T)
6929   _result = _execute.execute(b"Relu6Grad", 1, inputs=_inputs_flat,
6930                              attrs=_attrs, ctx=_ctx, name=name)
6931   _execute.record_gradient(
6932       "Relu6Grad", _inputs_flat, _attrs, _result, name)
6933   _result, = _result
6934   return _result
6935 
6936 
6937 def relu_grad(gradients, features, name=None):
6938   r"""Computes rectified linear gradients for a Relu operation.
6939 
6940   Args:
6941     gradients: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
6942       The backpropagated gradients to the corresponding Relu operation.
6943     features: A `Tensor`. Must have the same type as `gradients`.
6944       The features passed as input to the corresponding Relu operation, OR
6945       the outputs of that operation (both work equivalently).
6946     name: A name for the operation (optional).
6947 
6948   Returns:
6949     A `Tensor`. Has the same type as `gradients`.
6950   """
6951   _ctx = _context._context
6952   if _ctx is None or not _ctx._eager_context.is_eager:
6953     _, _, _op = _op_def_lib._apply_op_helper(
6954         "ReluGrad", gradients=gradients, features=features, name=name)
6955     _result = _op.outputs[:]
6956     _inputs_flat = _op.inputs
6957     _attrs = ("T", _op.get_attr("T"))
6958     _execute.record_gradient(
6959       "ReluGrad", _inputs_flat, _attrs, _result, name)
6960     _result, = _result
6961     return _result
6962 
6963   else:
6964     try:
6965       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
6966         _ctx._context_handle, _ctx._eager_context.device_name, "ReluGrad",
6967         name, _ctx._post_execution_callbacks, gradients, features)
6968       return _result
6969     except _core._FallbackException:
6970       return relu_grad_eager_fallback(
6971           gradients, features, name=name, ctx=_ctx)
6972     except _core._NotOkStatusException as e:
6973       if name is not None:
6974         message = e.message + " name: " + name
6975       else:
6976         message = e.message
6977       _six.raise_from(_core._status_to_exception(e.code, message), None)
6978 
6979 
6980 def relu_grad_eager_fallback(gradients, features, name=None, ctx=None):
6981   r"""This is the slowpath function for Eager mode.
6982   This is for function relu_grad
6983   """
6984   _ctx = ctx if ctx else _context.context()
6985   _attr_T, _inputs_T = _execute.args_to_matching_eager([gradients, features], _ctx)
6986   (gradients, features) = _inputs_T
6987   _inputs_flat = [gradients, features]
6988   _attrs = ("T", _attr_T)
6989   _result = _execute.execute(b"ReluGrad", 1, inputs=_inputs_flat,
6990                              attrs=_attrs, ctx=_ctx, name=name)
6991   _execute.record_gradient(
6992       "ReluGrad", _inputs_flat, _attrs, _result, name)
6993   _result, = _result
6994   return _result
6995 
6996 
6997 @tf_export('nn.selu')
6998 def selu(features, name=None):
6999   r"""Computes scaled exponential linear: `scale * alpha * (exp(features) - 1)`
7000 
7001   if < 0, `scale * features` otherwise.
7002 
7003   To be used together with
7004   `initializer = tf.variance_scaling_initializer(factor=1.0, mode='FAN_IN')`.
7005   For correct dropout, use `tf.contrib.nn.alpha_dropout`.
7006 
7007   See [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515)
7008 
7009   Args:
7010     features: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
7011     name: A name for the operation (optional).
7012 
7013   Returns:
7014     A `Tensor`. Has the same type as `features`.
7015   """
7016   _ctx = _context._context
7017   if _ctx is None or not _ctx._eager_context.is_eager:
7018     _, _, _op = _op_def_lib._apply_op_helper(
7019         "Selu", features=features, name=name)
7020     _result = _op.outputs[:]
7021     _inputs_flat = _op.inputs
7022     _attrs = ("T", _op.get_attr("T"))
7023     _execute.record_gradient(
7024       "Selu", _inputs_flat, _attrs, _result, name)
7025     _result, = _result
7026     return _result
7027 
7028   else:
7029     try:
7030       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
7031         _ctx._context_handle, _ctx._eager_context.device_name, "Selu", name,
7032         _ctx._post_execution_callbacks, features)
7033       return _result
7034     except _core._FallbackException:
7035       return selu_eager_fallback(
7036           features, name=name, ctx=_ctx)
7037     except _core._NotOkStatusException as e:
7038       if name is not None:
7039         message = e.message + " name: " + name
7040       else:
7041         message = e.message
7042       _six.raise_from(_core._status_to_exception(e.code, message), None)
7043 
7044 
7045 def selu_eager_fallback(features, name=None, ctx=None):
7046   r"""This is the slowpath function for Eager mode.
7047   This is for function selu
7048   """
7049   _ctx = ctx if ctx else _context.context()
7050   _attr_T, (features,) = _execute.args_to_matching_eager([features], _ctx)
7051   _inputs_flat = [features]
7052   _attrs = ("T", _attr_T)
7053   _result = _execute.execute(b"Selu", 1, inputs=_inputs_flat, attrs=_attrs,
7054                              ctx=_ctx, name=name)
7055   _execute.record_gradient(
7056       "Selu", _inputs_flat, _attrs, _result, name)
7057   _result, = _result
7058   return _result
7059 
7060 
7061 def selu_grad(gradients, outputs, name=None):
7062   r"""Computes gradients for the scaled exponential linear (Selu) operation.
7063 
7064   Args:
7065     gradients: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
7066       The backpropagated gradients to the corresponding Selu operation.
7067     outputs: A `Tensor`. Must have the same type as `gradients`.
7068       The outputs of the corresponding Selu operation.
7069     name: A name for the operation (optional).
7070 
7071   Returns:
7072     A `Tensor`. Has the same type as `gradients`.
7073   """
7074   _ctx = _context._context
7075   if _ctx is None or not _ctx._eager_context.is_eager:
7076     _, _, _op = _op_def_lib._apply_op_helper(
7077         "SeluGrad", gradients=gradients, outputs=outputs, name=name)
7078     _result = _op.outputs[:]
7079     _inputs_flat = _op.inputs
7080     _attrs = ("T", _op.get_attr("T"))
7081     _execute.record_gradient(
7082       "SeluGrad", _inputs_flat, _attrs, _result, name)
7083     _result, = _result
7084     return _result
7085 
7086   else:
7087     try:
7088       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
7089         _ctx._context_handle, _ctx._eager_context.device_name, "SeluGrad",
7090         name, _ctx._post_execution_callbacks, gradients, outputs)
7091       return _result
7092     except _core._FallbackException:
7093       return selu_grad_eager_fallback(
7094           gradients, outputs, name=name, ctx=_ctx)
7095     except _core._NotOkStatusException as e:
7096       if name is not None:
7097         message = e.message + " name: " + name
7098       else:
7099         message = e.message
7100       _six.raise_from(_core._status_to_exception(e.code, message), None)
7101 
7102 
7103 def selu_grad_eager_fallback(gradients, outputs, name=None, ctx=None):
7104   r"""This is the slowpath function for Eager mode.
7105   This is for function selu_grad
7106   """
7107   _ctx = ctx if ctx else _context.context()
7108   _attr_T, _inputs_T = _execute.args_to_matching_eager([gradients, outputs], _ctx)
7109   (gradients, outputs) = _inputs_T
7110   _inputs_flat = [gradients, outputs]
7111   _attrs = ("T", _attr_T)
7112   _result = _execute.execute(b"SeluGrad", 1, inputs=_inputs_flat,
7113                              attrs=_attrs, ctx=_ctx, name=name)
7114   _execute.record_gradient(
7115       "SeluGrad", _inputs_flat, _attrs, _result, name)
7116   _result, = _result
7117   return _result
7118 
7119 
7120 def softmax(logits, name=None):
7121   r"""Computes softmax activations.
7122 
7123   For each batch `i` and class `j` we have
7124 
7125       $$softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j]))$$
7126 
7127   Args:
7128     logits: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
7129       2-D with shape `[batch_size, num_classes]`.
7130     name: A name for the operation (optional).
7131 
7132   Returns:
7133     A `Tensor`. Has the same type as `logits`.
7134   """
7135   _ctx = _context._context
7136   if _ctx is None or not _ctx._eager_context.is_eager:
7137     _, _, _op = _op_def_lib._apply_op_helper(
7138         "Softmax", logits=logits, name=name)
7139     _result = _op.outputs[:]
7140     _inputs_flat = _op.inputs
7141     _attrs = ("T", _op.get_attr("T"))
7142     _execute.record_gradient(
7143       "Softmax", _inputs_flat, _attrs, _result, name)
7144     _result, = _result
7145     return _result
7146 
7147   else:
7148     try:
7149       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
7150         _ctx._context_handle, _ctx._eager_context.device_name, "Softmax",
7151         name, _ctx._post_execution_callbacks, logits)
7152       return _result
7153     except _core._FallbackException:
7154       return softmax_eager_fallback(
7155           logits, name=name, ctx=_ctx)
7156     except _core._NotOkStatusException as e:
7157       if name is not None:
7158         message = e.message + " name: " + name
7159       else:
7160         message = e.message
7161       _six.raise_from(_core._status_to_exception(e.code, message), None)
7162 
7163 
7164 def softmax_eager_fallback(logits, name=None, ctx=None):
7165   r"""This is the slowpath function for Eager mode.
7166   This is for function softmax
7167   """
7168   _ctx = ctx if ctx else _context.context()
7169   _attr_T, (logits,) = _execute.args_to_matching_eager([logits], _ctx)
7170   _inputs_flat = [logits]
7171   _attrs = ("T", _attr_T)
7172   _result = _execute.execute(b"Softmax", 1, inputs=_inputs_flat, attrs=_attrs,
7173                              ctx=_ctx, name=name)
7174   _execute.record_gradient(
7175       "Softmax", _inputs_flat, _attrs, _result, name)
7176   _result, = _result
7177   return _result
7178 
7179 
7180 _softmax_cross_entropy_with_logits_outputs = ["loss", "backprop"]
7181 _SoftmaxCrossEntropyWithLogitsOutput = _collections.namedtuple(
7182     "SoftmaxCrossEntropyWithLogits",
7183     _softmax_cross_entropy_with_logits_outputs)
7184 
7185 
7186 def softmax_cross_entropy_with_logits(features, labels, name=None):
7187   r"""Computes softmax cross entropy cost and gradients to backpropagate.
7188 
7189   Inputs are the logits, not probabilities.
7190 
7191   Args:
7192     features: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
7193       batch_size x num_classes matrix
7194     labels: A `Tensor`. Must have the same type as `features`.
7195       batch_size x num_classes matrix
7196       The caller must ensure that each batch of labels represents a valid
7197       probability distribution.
7198     name: A name for the operation (optional).
7199 
7200   Returns:
7201     A tuple of `Tensor` objects (loss, backprop).
7202 
7203     loss: A `Tensor`. Has the same type as `features`.
7204     backprop: A `Tensor`. Has the same type as `features`.
7205   """
7206   _ctx = _context._context
7207   if _ctx is None or not _ctx._eager_context.is_eager:
7208     _, _, _op = _op_def_lib._apply_op_helper(
7209         "SoftmaxCrossEntropyWithLogits", features=features, labels=labels,
7210         name=name)
7211     _result = _op.outputs[:]
7212     _inputs_flat = _op.inputs
7213     _attrs = ("T", _op.get_attr("T"))
7214     _execute.record_gradient(
7215       "SoftmaxCrossEntropyWithLogits", _inputs_flat, _attrs, _result, name)
7216     _result = _SoftmaxCrossEntropyWithLogitsOutput._make(_result)
7217     return _result
7218 
7219   else:
7220     try:
7221       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
7222         _ctx._context_handle, _ctx._eager_context.device_name,
7223         "SoftmaxCrossEntropyWithLogits", name, _ctx._post_execution_callbacks,
7224         features, labels)
7225       _result = _SoftmaxCrossEntropyWithLogitsOutput._make(_result)
7226       return _result
7227     except _core._FallbackException:
7228       return softmax_cross_entropy_with_logits_eager_fallback(
7229           features, labels, name=name, ctx=_ctx)
7230     except _core._NotOkStatusException as e:
7231       if name is not None:
7232         message = e.message + " name: " + name
7233       else:
7234         message = e.message
7235       _six.raise_from(_core._status_to_exception(e.code, message), None)
7236 
7237 
7238 def softmax_cross_entropy_with_logits_eager_fallback(features, labels, name=None, ctx=None):
7239   r"""This is the slowpath function for Eager mode.
7240   This is for function softmax_cross_entropy_with_logits
7241   """
7242   _ctx = ctx if ctx else _context.context()
7243   _attr_T, _inputs_T = _execute.args_to_matching_eager([features, labels], _ctx)
7244   (features, labels) = _inputs_T
7245   _inputs_flat = [features, labels]
7246   _attrs = ("T", _attr_T)
7247   _result = _execute.execute(b"SoftmaxCrossEntropyWithLogits", 2,
7248                              inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
7249                              name=name)
7250   _execute.record_gradient(
7251       "SoftmaxCrossEntropyWithLogits", _inputs_flat, _attrs, _result, name)
7252   _result = _SoftmaxCrossEntropyWithLogitsOutput._make(_result)
7253   return _result
7254 
7255 
7256 @tf_export('math.softplus', 'nn.softplus')
7257 def softplus(features, name=None):
7258   r"""Computes softplus: `log(exp(features) + 1)`.
7259 
7260   Args:
7261     features: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
7262     name: A name for the operation (optional).
7263 
7264   Returns:
7265     A `Tensor`. Has the same type as `features`.
7266   """
7267   _ctx = _context._context
7268   if _ctx is None or not _ctx._eager_context.is_eager:
7269     _, _, _op = _op_def_lib._apply_op_helper(
7270         "Softplus", features=features, name=name)
7271     _result = _op.outputs[:]
7272     _inputs_flat = _op.inputs
7273     _attrs = ("T", _op.get_attr("T"))
7274     _execute.record_gradient(
7275       "Softplus", _inputs_flat, _attrs, _result, name)
7276     _result, = _result
7277     return _result
7278 
7279   else:
7280     try:
7281       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
7282         _ctx._context_handle, _ctx._eager_context.device_name, "Softplus",
7283         name, _ctx._post_execution_callbacks, features)
7284       return _result
7285     except _core._FallbackException:
7286       return softplus_eager_fallback(
7287           features, name=name, ctx=_ctx)
7288     except _core._NotOkStatusException as e:
7289       if name is not None:
7290         message = e.message + " name: " + name
7291       else:
7292         message = e.message
7293       _six.raise_from(_core._status_to_exception(e.code, message), None)
7294 
7295 
7296 def softplus_eager_fallback(features, name=None, ctx=None):
7297   r"""This is the slowpath function for Eager mode.
7298   This is for function softplus
7299   """
7300   _ctx = ctx if ctx else _context.context()
7301   _attr_T, (features,) = _execute.args_to_matching_eager([features], _ctx)
7302   _inputs_flat = [features]
7303   _attrs = ("T", _attr_T)
7304   _result = _execute.execute(b"Softplus", 1, inputs=_inputs_flat,
7305                              attrs=_attrs, ctx=_ctx, name=name)
7306   _execute.record_gradient(
7307       "Softplus", _inputs_flat, _attrs, _result, name)
7308   _result, = _result
7309   return _result
7310 
7311 
7312 def softplus_grad(gradients, features, name=None):
7313   r"""Computes softplus gradients for a softplus operation.
7314 
7315   Args:
7316     gradients: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
7317       The backpropagated gradients to the corresponding softplus operation.
7318     features: A `Tensor`. Must have the same type as `gradients`.
7319       The features passed as input to the corresponding softplus operation.
7320     name: A name for the operation (optional).
7321 
7322   Returns:
7323     A `Tensor`. Has the same type as `gradients`.
7324   """
7325   _ctx = _context._context
7326   if _ctx is None or not _ctx._eager_context.is_eager:
7327     _, _, _op = _op_def_lib._apply_op_helper(
7328         "SoftplusGrad", gradients=gradients, features=features, name=name)
7329     _result = _op.outputs[:]
7330     _inputs_flat = _op.inputs
7331     _attrs = ("T", _op.get_attr("T"))
7332     _execute.record_gradient(
7333       "SoftplusGrad", _inputs_flat, _attrs, _result, name)
7334     _result, = _result
7335     return _result
7336 
7337   else:
7338     try:
7339       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
7340         _ctx._context_handle, _ctx._eager_context.device_name, "SoftplusGrad",
7341         name, _ctx._post_execution_callbacks, gradients, features)
7342       return _result
7343     except _core._FallbackException:
7344       return softplus_grad_eager_fallback(
7345           gradients, features, name=name, ctx=_ctx)
7346     except _core._NotOkStatusException as e:
7347       if name is not None:
7348         message = e.message + " name: " + name
7349       else:
7350         message = e.message
7351       _six.raise_from(_core._status_to_exception(e.code, message), None)
7352 
7353 
7354 def softplus_grad_eager_fallback(gradients, features, name=None, ctx=None):
7355   r"""This is the slowpath function for Eager mode.
7356   This is for function softplus_grad
7357   """
7358   _ctx = ctx if ctx else _context.context()
7359   _attr_T, _inputs_T = _execute.args_to_matching_eager([gradients, features], _ctx)
7360   (gradients, features) = _inputs_T
7361   _inputs_flat = [gradients, features]
7362   _attrs = ("T", _attr_T)
7363   _result = _execute.execute(b"SoftplusGrad", 1, inputs=_inputs_flat,
7364                              attrs=_attrs, ctx=_ctx, name=name)
7365   _execute.record_gradient(
7366       "SoftplusGrad", _inputs_flat, _attrs, _result, name)
7367   _result, = _result
7368   return _result
7369 
7370 
7371 @tf_export('nn.softsign', 'math.softsign')
7372 def softsign(features, name=None):
7373   r"""Computes softsign: `features / (abs(features) + 1)`.
7374 
7375   Args:
7376     features: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
7377     name: A name for the operation (optional).
7378 
7379   Returns:
7380     A `Tensor`. Has the same type as `features`.
7381   """
7382   _ctx = _context._context
7383   if _ctx is None or not _ctx._eager_context.is_eager:
7384     _, _, _op = _op_def_lib._apply_op_helper(
7385         "Softsign", features=features, name=name)
7386     _result = _op.outputs[:]
7387     _inputs_flat = _op.inputs
7388     _attrs = ("T", _op.get_attr("T"))
7389     _execute.record_gradient(
7390       "Softsign", _inputs_flat, _attrs, _result, name)
7391     _result, = _result
7392     return _result
7393 
7394   else:
7395     try:
7396       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
7397         _ctx._context_handle, _ctx._eager_context.device_name, "Softsign",
7398         name, _ctx._post_execution_callbacks, features)
7399       return _result
7400     except _core._FallbackException:
7401       return softsign_eager_fallback(
7402           features, name=name, ctx=_ctx)
7403     except _core._NotOkStatusException as e:
7404       if name is not None:
7405         message = e.message + " name: " + name
7406       else:
7407         message = e.message
7408       _six.raise_from(_core._status_to_exception(e.code, message), None)
7409 
7410 
7411 def softsign_eager_fallback(features, name=None, ctx=None):
7412   r"""This is the slowpath function for Eager mode.
7413   This is for function softsign
7414   """
7415   _ctx = ctx if ctx else _context.context()
7416   _attr_T, (features,) = _execute.args_to_matching_eager([features], _ctx)
7417   _inputs_flat = [features]
7418   _attrs = ("T", _attr_T)
7419   _result = _execute.execute(b"Softsign", 1, inputs=_inputs_flat,
7420                              attrs=_attrs, ctx=_ctx, name=name)
7421   _execute.record_gradient(
7422       "Softsign", _inputs_flat, _attrs, _result, name)
7423   _result, = _result
7424   return _result
7425 
7426 
7427 def softsign_grad(gradients, features, name=None):
7428   r"""Computes softsign gradients for a softsign operation.
7429 
7430   Args:
7431     gradients: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
7432       The backpropagated gradients to the corresponding softsign operation.
7433     features: A `Tensor`. Must have the same type as `gradients`.
7434       The features passed as input to the corresponding softsign operation.
7435     name: A name for the operation (optional).
7436 
7437   Returns:
7438     A `Tensor`. Has the same type as `gradients`.
7439   """
7440   _ctx = _context._context
7441   if _ctx is None or not _ctx._eager_context.is_eager:
7442     _, _, _op = _op_def_lib._apply_op_helper(
7443         "SoftsignGrad", gradients=gradients, features=features, name=name)
7444     _result = _op.outputs[:]
7445     _inputs_flat = _op.inputs
7446     _attrs = ("T", _op.get_attr("T"))
7447     _execute.record_gradient(
7448       "SoftsignGrad", _inputs_flat, _attrs, _result, name)
7449     _result, = _result
7450     return _result
7451 
7452   else:
7453     try:
7454       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
7455         _ctx._context_handle, _ctx._eager_context.device_name, "SoftsignGrad",
7456         name, _ctx._post_execution_callbacks, gradients, features)
7457       return _result
7458     except _core._FallbackException:
7459       return softsign_grad_eager_fallback(
7460           gradients, features, name=name, ctx=_ctx)
7461     except _core._NotOkStatusException as e:
7462       if name is not None:
7463         message = e.message + " name: " + name
7464       else:
7465         message = e.message
7466       _six.raise_from(_core._status_to_exception(e.code, message), None)
7467 
7468 
7469 def softsign_grad_eager_fallback(gradients, features, name=None, ctx=None):
7470   r"""This is the slowpath function for Eager mode.
7471   This is for function softsign_grad
7472   """
7473   _ctx = ctx if ctx else _context.context()
7474   _attr_T, _inputs_T = _execute.args_to_matching_eager([gradients, features], _ctx)
7475   (gradients, features) = _inputs_T
7476   _inputs_flat = [gradients, features]
7477   _attrs = ("T", _attr_T)
7478   _result = _execute.execute(b"SoftsignGrad", 1, inputs=_inputs_flat,
7479                              attrs=_attrs, ctx=_ctx, name=name)
7480   _execute.record_gradient(
7481       "SoftsignGrad", _inputs_flat, _attrs, _result, name)
7482   _result, = _result
7483   return _result
7484 
7485 
7486 _sparse_softmax_cross_entropy_with_logits_outputs = ["loss", "backprop"]
7487 _SparseSoftmaxCrossEntropyWithLogitsOutput = _collections.namedtuple(
7488     "SparseSoftmaxCrossEntropyWithLogits",
7489     _sparse_softmax_cross_entropy_with_logits_outputs)
7490 
7491 
7492 def sparse_softmax_cross_entropy_with_logits(features, labels, name=None):
7493   r"""Computes softmax cross entropy cost and gradients to backpropagate.
7494 
7495   Unlike `SoftmaxCrossEntropyWithLogits`, this operation does not accept
7496   a matrix of label probabilities, but rather a single label per row
7497   of features.  This label is considered to have probability 1.0 for the
7498   given row.
7499 
7500   Inputs are the logits, not probabilities.
7501 
7502   Args:
7503     features: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
7504       batch_size x num_classes matrix
7505     labels: A `Tensor`. Must be one of the following types: `int32`, `int64`.
7506       batch_size vector with values in [0, num_classes).
7507       This is the label for the given minibatch entry.
7508     name: A name for the operation (optional).
7509 
7510   Returns:
7511     A tuple of `Tensor` objects (loss, backprop).
7512 
7513     loss: A `Tensor`. Has the same type as `features`.
7514     backprop: A `Tensor`. Has the same type as `features`.
7515   """
7516   _ctx = _context._context
7517   if _ctx is None or not _ctx._eager_context.is_eager:
7518     _, _, _op = _op_def_lib._apply_op_helper(
7519         "SparseSoftmaxCrossEntropyWithLogits", features=features,
7520         labels=labels, name=name)
7521     _result = _op.outputs[:]
7522     _inputs_flat = _op.inputs
7523     _attrs = ("T", _op.get_attr("T"), "Tlabels", _op.get_attr("Tlabels"))
7524     _execute.record_gradient(
7525       "SparseSoftmaxCrossEntropyWithLogits", _inputs_flat, _attrs, _result, name)
7526     _result = _SparseSoftmaxCrossEntropyWithLogitsOutput._make(_result)
7527     return _result
7528 
7529   else:
7530     try:
7531       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
7532         _ctx._context_handle, _ctx._eager_context.device_name,
7533         "SparseSoftmaxCrossEntropyWithLogits", name,
7534         _ctx._post_execution_callbacks, features, labels)
7535       _result = _SparseSoftmaxCrossEntropyWithLogitsOutput._make(_result)
7536       return _result
7537     except _core._FallbackException:
7538       return sparse_softmax_cross_entropy_with_logits_eager_fallback(
7539           features, labels, name=name, ctx=_ctx)
7540     except _core._NotOkStatusException as e:
7541       if name is not None:
7542         message = e.message + " name: " + name
7543       else:
7544         message = e.message
7545       _six.raise_from(_core._status_to_exception(e.code, message), None)
7546 
7547 
7548 def sparse_softmax_cross_entropy_with_logits_eager_fallback(features, labels, name=None, ctx=None):
7549   r"""This is the slowpath function for Eager mode.
7550   This is for function sparse_softmax_cross_entropy_with_logits
7551   """
7552   _ctx = ctx if ctx else _context.context()
7553   _attr_T, (features,) = _execute.args_to_matching_eager([features], _ctx)
7554   _attr_Tlabels, (labels,) = _execute.args_to_matching_eager([labels], _ctx, _dtypes.int64)
7555   _inputs_flat = [features, labels]
7556   _attrs = ("T", _attr_T, "Tlabels", _attr_Tlabels)
7557   _result = _execute.execute(b"SparseSoftmaxCrossEntropyWithLogits", 2,
7558                              inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
7559                              name=name)
7560   _execute.record_gradient(
7561       "SparseSoftmaxCrossEntropyWithLogits", _inputs_flat, _attrs, _result, name)
7562   _result = _SparseSoftmaxCrossEntropyWithLogitsOutput._make(_result)
7563   return _result
7564 
7565 
7566 _top_k_outputs = ["values", "indices"]
7567 _TopKOutput = _collections.namedtuple(
7568     "TopK", _top_k_outputs)
7569 
7570 
7571 def top_k(input, k, sorted=True, name=None):
7572   r"""Finds values and indices of the `k` largest elements for the last dimension.
7573 
7574   If the input is a vector (rank-1), finds the `k` largest entries in the vector
7575   and outputs their values and indices as vectors.  Thus `values[j]` is the
7576   `j`-th largest entry in `input`, and its index is `indices[j]`.
7577 
7578   For matrices (resp. higher rank input), computes the top `k` entries in each
7579   row (resp. vector along the last dimension).  Thus,
7580 
7581       values.shape = indices.shape = input.shape[:-1] + [k]
7582 
7583   If two elements are equal, the lower-index element appears first.
7584 
7585   If `k` varies dynamically, use `TopKV2` below.
7586 
7587   Args:
7588     input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
7589       1-D or higher with last dimension at least `k`.
7590     k: An `int` that is `>= 0`.
7591       Number of top elements to look for along the last dimension (along each
7592       row for matrices).
7593     sorted: An optional `bool`. Defaults to `True`.
7594       If true the resulting `k` elements will be sorted by the values in
7595       descending order.
7596     name: A name for the operation (optional).
7597 
7598   Returns:
7599     A tuple of `Tensor` objects (values, indices).
7600 
7601     values: A `Tensor`. Has the same type as `input`.
7602     indices: A `Tensor` of type `int32`.
7603   """
7604   _ctx = _context._context
7605   if _ctx is None or not _ctx._eager_context.is_eager:
7606     k = _execute.make_int(k, "k")
7607     if sorted is None:
7608       sorted = True
7609     sorted = _execute.make_bool(sorted, "sorted")
7610     _, _, _op = _op_def_lib._apply_op_helper(
7611         "TopK", input=input, k=k, sorted=sorted, name=name)
7612     _result = _op.outputs[:]
7613     _inputs_flat = _op.inputs
7614     _attrs = ("k", _op.get_attr("k"), "sorted", _op.get_attr("sorted"), "T",
7615               _op.get_attr("T"))
7616     _execute.record_gradient(
7617       "TopK", _inputs_flat, _attrs, _result, name)
7618     _result = _TopKOutput._make(_result)
7619     return _result
7620 
7621   else:
7622     try:
7623       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
7624         _ctx._context_handle, _ctx._eager_context.device_name, "TopK", name,
7625         _ctx._post_execution_callbacks, input, "k", k, "sorted", sorted)
7626       _result = _TopKOutput._make(_result)
7627       return _result
7628     except _core._FallbackException:
7629       return top_k_eager_fallback(
7630           input, k=k, sorted=sorted, name=name, ctx=_ctx)
7631     except _core._NotOkStatusException as e:
7632       if name is not None:
7633         message = e.message + " name: " + name
7634       else:
7635         message = e.message
7636       _six.raise_from(_core._status_to_exception(e.code, message), None)
7637 
7638 
7639 def top_k_eager_fallback(input, k, sorted=True, name=None, ctx=None):
7640   r"""This is the slowpath function for Eager mode.
7641   This is for function top_k
7642   """
7643   _ctx = ctx if ctx else _context.context()
7644   k = _execute.make_int(k, "k")
7645   if sorted is None:
7646     sorted = True
7647   sorted = _execute.make_bool(sorted, "sorted")
7648   _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
7649   _inputs_flat = [input]
7650   _attrs = ("k", k, "sorted", sorted, "T", _attr_T)
7651   _result = _execute.execute(b"TopK", 2, inputs=_inputs_flat, attrs=_attrs,
7652                              ctx=_ctx, name=name)
7653   _execute.record_gradient(
7654       "TopK", _inputs_flat, _attrs, _result, name)
7655   _result = _TopKOutput._make(_result)
7656   return _result
7657 
7658 
7659 _top_kv2_outputs = ["values", "indices"]
7660 _TopKV2Output = _collections.namedtuple(
7661     "TopKV2", _top_kv2_outputs)
7662 
7663 
7664 def top_kv2(input, k, sorted=True, name=None):
7665   r"""Finds values and indices of the `k` largest elements for the last dimension.
7666 
7667   If the input is a vector (rank-1), finds the `k` largest entries in the vector
7668   and outputs their values and indices as vectors.  Thus `values[j]` is the
7669   `j`-th largest entry in `input`, and its index is `indices[j]`.
7670 
7671   For matrices (resp. higher rank input), computes the top `k` entries in each
7672   row (resp. vector along the last dimension).  Thus,
7673 
7674       values.shape = indices.shape = input.shape[:-1] + [k]
7675 
7676   If two elements are equal, the lower-index element appears first.
7677 
7678   Args:
7679     input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
7680       1-D or higher with last dimension at least `k`.
7681     k: A `Tensor` of type `int32`.
7682       0-D.  Number of top elements to look for along the last dimension (along each
7683       row for matrices).
7684     sorted: An optional `bool`. Defaults to `True`.
7685       If true the resulting `k` elements will be sorted by the values in
7686       descending order.
7687     name: A name for the operation (optional).
7688 
7689   Returns:
7690     A tuple of `Tensor` objects (values, indices).
7691 
7692     values: A `Tensor`. Has the same type as `input`.
7693     indices: A `Tensor` of type `int32`.
7694   """
7695   _ctx = _context._context
7696   if _ctx is None or not _ctx._eager_context.is_eager:
7697     if sorted is None:
7698       sorted = True
7699     sorted = _execute.make_bool(sorted, "sorted")
7700     _, _, _op = _op_def_lib._apply_op_helper(
7701         "TopKV2", input=input, k=k, sorted=sorted, name=name)
7702     _result = _op.outputs[:]
7703     _inputs_flat = _op.inputs
7704     _attrs = ("sorted", _op.get_attr("sorted"), "T", _op.get_attr("T"))
7705     _execute.record_gradient(
7706       "TopKV2", _inputs_flat, _attrs, _result, name)
7707     _result = _TopKV2Output._make(_result)
7708     return _result
7709 
7710   else:
7711     try:
7712       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
7713         _ctx._context_handle, _ctx._eager_context.device_name, "TopKV2", name,
7714         _ctx._post_execution_callbacks, input, k, "sorted", sorted)
7715       _result = _TopKV2Output._make(_result)
7716       return _result
7717     except _core._FallbackException:
7718       return top_kv2_eager_fallback(
7719           input, k, sorted=sorted, name=name, ctx=_ctx)
7720     except _core._NotOkStatusException as e:
7721       if name is not None:
7722         message = e.message + " name: " + name
7723       else:
7724         message = e.message
7725       _six.raise_from(_core._status_to_exception(e.code, message), None)
7726 
7727 
7728 def top_kv2_eager_fallback(input, k, sorted=True, name=None, ctx=None):
7729   r"""This is the slowpath function for Eager mode.
7730   This is for function top_kv2
7731   """
7732   _ctx = ctx if ctx else _context.context()
7733   if sorted is None:
7734     sorted = True
7735   sorted = _execute.make_bool(sorted, "sorted")
7736   _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
7737   k = _ops.convert_to_tensor(k, _dtypes.int32)
7738   _inputs_flat = [input, k]
7739   _attrs = ("sorted", sorted, "T", _attr_T)
7740   _result = _execute.execute(b"TopKV2", 2, inputs=_inputs_flat, attrs=_attrs,
7741                              ctx=_ctx, name=name)
7742   _execute.record_gradient(
7743       "TopKV2", _inputs_flat, _attrs, _result, name)
7744   _result = _TopKV2Output._make(_result)
7745   return _result
7746 
7747 def _InitOpDefLibrary(op_list_proto_bytes):
7748   op_list = _op_def_pb2.OpList()
7749   op_list.ParseFromString(op_list_proto_bytes)
7750   _op_def_registry.register_op_list(op_list)
7751   op_def_lib = _op_def_library.OpDefLibrary()
7752   op_def_lib.add_op_list(op_list)
7753   return op_def_lib
7754 # op {
7755 #   name: "AvgPool"
7756 #   input_arg {
7757 #     name: "value"
7758 #     type_attr: "T"
7759 #   }
7760 #   output_arg {
7761 #     name: "output"
7762 #     type_attr: "T"
7763 #   }
7764 #   attr {
7765 #     name: "ksize"
7766 #     type: "list(int)"
7767 #     has_minimum: true
7768 #     minimum: 4
7769 #   }
7770 #   attr {
7771 #     name: "strides"
7772 #     type: "list(int)"
7773 #     has_minimum: true
7774 #     minimum: 4
7775 #   }
7776 #   attr {
7777 #     name: "padding"
7778 #     type: "string"
7779 #     allowed_values {
7780 #       list {
7781 #         s: "SAME"
7782 #         s: "VALID"
7783 #       }
7784 #     }
7785 #   }
7786 #   attr {
7787 #     name: "data_format"
7788 #     type: "string"
7789 #     default_value {
7790 #       s: "NHWC"
7791 #     }
7792 #     allowed_values {
7793 #       list {
7794 #         s: "NHWC"
7795 #         s: "NCHW"
7796 #       }
7797 #     }
7798 #   }
7799 #   attr {
7800 #     name: "T"
7801 #     type: "type"
7802 #     allowed_values {
7803 #       list {
7804 #         type: DT_HALF
7805 #         type: DT_BFLOAT16
7806 #         type: DT_FLOAT
7807 #         type: DT_DOUBLE
7808 #       }
7809 #     }
7810 #   }
7811 # }
7812 # op {
7813 #   name: "AvgPool3D"
7814 #   input_arg {
7815 #     name: "input"
7816 #     type_attr: "T"
7817 #   }
7818 #   output_arg {
7819 #     name: "output"
7820 #     type_attr: "T"
7821 #   }
7822 #   attr {
7823 #     name: "ksize"
7824 #     type: "list(int)"
7825 #     has_minimum: true
7826 #     minimum: 5
7827 #   }
7828 #   attr {
7829 #     name: "strides"
7830 #     type: "list(int)"
7831 #     has_minimum: true
7832 #     minimum: 5
7833 #   }
7834 #   attr {
7835 #     name: "padding"
7836 #     type: "string"
7837 #     allowed_values {
7838 #       list {
7839 #         s: "SAME"
7840 #         s: "VALID"
7841 #       }
7842 #     }
7843 #   }
7844 #   attr {
7845 #     name: "data_format"
7846 #     type: "string"
7847 #     default_value {
7848 #       s: "NDHWC"
7849 #     }
7850 #     allowed_values {
7851 #       list {
7852 #         s: "NDHWC"
7853 #         s: "NCDHW"
7854 #       }
7855 #     }
7856 #   }
7857 #   attr {
7858 #     name: "T"
7859 #     type: "type"
7860 #     allowed_values {
7861 #       list {
7862 #         type: DT_HALF
7863 #         type: DT_BFLOAT16
7864 #         type: DT_FLOAT
7865 #         type: DT_DOUBLE
7866 #       }
7867 #     }
7868 #   }
7869 # }
7870 # op {
7871 #   name: "AvgPool3DGrad"
7872 #   input_arg {
7873 #     name: "orig_input_shape"
7874 #     type: DT_INT32
7875 #   }
7876 #   input_arg {
7877 #     name: "grad"
7878 #     type_attr: "T"
7879 #   }
7880 #   output_arg {
7881 #     name: "output"
7882 #     type_attr: "T"
7883 #   }
7884 #   attr {
7885 #     name: "ksize"
7886 #     type: "list(int)"
7887 #     has_minimum: true
7888 #     minimum: 5
7889 #   }
7890 #   attr {
7891 #     name: "strides"
7892 #     type: "list(int)"
7893 #     has_minimum: true
7894 #     minimum: 5
7895 #   }
7896 #   attr {
7897 #     name: "padding"
7898 #     type: "string"
7899 #     allowed_values {
7900 #       list {
7901 #         s: "SAME"
7902 #         s: "VALID"
7903 #       }
7904 #     }
7905 #   }
7906 #   attr {
7907 #     name: "data_format"
7908 #     type: "string"
7909 #     default_value {
7910 #       s: "NDHWC"
7911 #     }
7912 #     allowed_values {
7913 #       list {
7914 #         s: "NDHWC"
7915 #         s: "NCDHW"
7916 #       }
7917 #     }
7918 #   }
7919 #   attr {
7920 #     name: "T"
7921 #     type: "type"
7922 #     allowed_values {
7923 #       list {
7924 #         type: DT_HALF
7925 #         type: DT_BFLOAT16
7926 #         type: DT_FLOAT
7927 #         type: DT_DOUBLE
7928 #       }
7929 #     }
7930 #   }
7931 # }
7932 # op {
7933 #   name: "AvgPoolGrad"
7934 #   input_arg {
7935 #     name: "orig_input_shape"
7936 #     type: DT_INT32
7937 #   }
7938 #   input_arg {
7939 #     name: "grad"
7940 #     type_attr: "T"
7941 #   }
7942 #   output_arg {
7943 #     name: "output"
7944 #     type_attr: "T"
7945 #   }
7946 #   attr {
7947 #     name: "ksize"
7948 #     type: "list(int)"
7949 #     has_minimum: true
7950 #     minimum: 4
7951 #   }
7952 #   attr {
7953 #     name: "strides"
7954 #     type: "list(int)"
7955 #     has_minimum: true
7956 #     minimum: 4
7957 #   }
7958 #   attr {
7959 #     name: "padding"
7960 #     type: "string"
7961 #     allowed_values {
7962 #       list {
7963 #         s: "SAME"
7964 #         s: "VALID"
7965 #       }
7966 #     }
7967 #   }
7968 #   attr {
7969 #     name: "data_format"
7970 #     type: "string"
7971 #     default_value {
7972 #       s: "NHWC"
7973 #     }
7974 #     allowed_values {
7975 #       list {
7976 #         s: "NHWC"
7977 #         s: "NCHW"
7978 #       }
7979 #     }
7980 #   }
7981 #   attr {
7982 #     name: "T"
7983 #     type: "type"
7984 #     allowed_values {
7985 #       list {
7986 #         type: DT_HALF
7987 #         type: DT_BFLOAT16
7988 #         type: DT_FLOAT
7989 #         type: DT_DOUBLE
7990 #       }
7991 #     }
7992 #   }
7993 # }
7994 # op {
7995 #   name: "BatchNormWithGlobalNormalization"
7996 #   input_arg {
7997 #     name: "t"
7998 #     type_attr: "T"
7999 #   }
8000 #   input_arg {
8001 #     name: "m"
8002 #     type_attr: "T"
8003 #   }
8004 #   input_arg {
8005 #     name: "v"
8006 #     type_attr: "T"
8007 #   }
8008 #   input_arg {
8009 #     name: "beta"
8010 #     type_attr: "T"
8011 #   }
8012 #   input_arg {
8013 #     name: "gamma"
8014 #     type_attr: "T"
8015 #   }
8016 #   output_arg {
8017 #     name: "result"
8018 #     type_attr: "T"
8019 #   }
8020 #   attr {
8021 #     name: "T"
8022 #     type: "type"
8023 #     allowed_values {
8024 #       list {
8025 #         type: DT_FLOAT
8026 #         type: DT_DOUBLE
8027 #         type: DT_INT32
8028 #         type: DT_UINT8
8029 #         type: DT_INT16
8030 #         type: DT_INT8
8031 #         type: DT_COMPLEX64
8032 #         type: DT_INT64
8033 #         type: DT_QINT8
8034 #         type: DT_QUINT8
8035 #         type: DT_QINT32
8036 #         type: DT_BFLOAT16
8037 #         type: DT_UINT16
8038 #         type: DT_COMPLEX128
8039 #         type: DT_HALF
8040 #         type: DT_UINT32
8041 #         type: DT_UINT64
8042 #       }
8043 #     }
8044 #   }
8045 #   attr {
8046 #     name: "variance_epsilon"
8047 #     type: "float"
8048 #   }
8049 #   attr {
8050 #     name: "scale_after_normalization"
8051 #     type: "bool"
8052 #   }
8053 #   deprecation {
8054 #     version: 9
8055 #     explanation: "Use tf.nn.batch_normalization()"
8056 #   }
8057 # }
8058 # op {
8059 #   name: "BatchNormWithGlobalNormalizationGrad"
8060 #   input_arg {
8061 #     name: "t"
8062 #     type_attr: "T"
8063 #   }
8064 #   input_arg {
8065 #     name: "m"
8066 #     type_attr: "T"
8067 #   }
8068 #   input_arg {
8069 #     name: "v"
8070 #     type_attr: "T"
8071 #   }
8072 #   input_arg {
8073 #     name: "gamma"
8074 #     type_attr: "T"
8075 #   }
8076 #   input_arg {
8077 #     name: "backprop"
8078 #     type_attr: "T"
8079 #   }
8080 #   output_arg {
8081 #     name: "dx"
8082 #     type_attr: "T"
8083 #   }
8084 #   output_arg {
8085 #     name: "dm"
8086 #     type_attr: "T"
8087 #   }
8088 #   output_arg {
8089 #     name: "dv"
8090 #     type_attr: "T"
8091 #   }
8092 #   output_arg {
8093 #     name: "db"
8094 #     type_attr: "T"
8095 #   }
8096 #   output_arg {
8097 #     name: "dg"
8098 #     type_attr: "T"
8099 #   }
8100 #   attr {
8101 #     name: "T"
8102 #     type: "type"
8103 #     allowed_values {
8104 #       list {
8105 #         type: DT_FLOAT
8106 #         type: DT_DOUBLE
8107 #         type: DT_INT32
8108 #         type: DT_UINT8
8109 #         type: DT_INT16
8110 #         type: DT_INT8
8111 #         type: DT_COMPLEX64
8112 #         type: DT_INT64
8113 #         type: DT_QINT8
8114 #         type: DT_QUINT8
8115 #         type: DT_QINT32
8116 #         type: DT_BFLOAT16
8117 #         type: DT_UINT16
8118 #         type: DT_COMPLEX128
8119 #         type: DT_HALF
8120 #         type: DT_UINT32
8121 #         type: DT_UINT64
8122 #       }
8123 #     }
8124 #   }
8125 #   attr {
8126 #     name: "variance_epsilon"
8127 #     type: "float"
8128 #   }
8129 #   attr {
8130 #     name: "scale_after_normalization"
8131 #     type: "bool"
8132 #   }
8133 #   deprecation {
8134 #     version: 9
8135 #     explanation: "Use tf.nn.batch_normalization()"
8136 #   }
8137 # }
8138 # op {
8139 #   name: "BiasAdd"
8140 #   input_arg {
8141 #     name: "value"
8142 #     type_attr: "T"
8143 #   }
8144 #   input_arg {
8145 #     name: "bias"
8146 #     type_attr: "T"
8147 #   }
8148 #   output_arg {
8149 #     name: "output"
8150 #     type_attr: "T"
8151 #   }
8152 #   attr {
8153 #     name: "T"
8154 #     type: "type"
8155 #     allowed_values {
8156 #       list {
8157 #         type: DT_FLOAT
8158 #         type: DT_DOUBLE
8159 #         type: DT_INT32
8160 #         type: DT_UINT8
8161 #         type: DT_INT16
8162 #         type: DT_INT8
8163 #         type: DT_COMPLEX64
8164 #         type: DT_INT64
8165 #         type: DT_QINT8
8166 #         type: DT_QUINT8
8167 #         type: DT_QINT32
8168 #         type: DT_BFLOAT16
8169 #         type: DT_UINT16
8170 #         type: DT_COMPLEX128
8171 #         type: DT_HALF
8172 #         type: DT_UINT32
8173 #         type: DT_UINT64
8174 #       }
8175 #     }
8176 #   }
8177 #   attr {
8178 #     name: "data_format"
8179 #     type: "string"
8180 #     default_value {
8181 #       s: "NHWC"
8182 #     }
8183 #     allowed_values {
8184 #       list {
8185 #         s: "NHWC"
8186 #         s: "NCHW"
8187 #       }
8188 #     }
8189 #   }
8190 # }
8191 # op {
8192 #   name: "BiasAddGrad"
8193 #   input_arg {
8194 #     name: "out_backprop"
8195 #     type_attr: "T"
8196 #   }
8197 #   output_arg {
8198 #     name: "output"
8199 #     type_attr: "T"
8200 #   }
8201 #   attr {
8202 #     name: "T"
8203 #     type: "type"
8204 #     allowed_values {
8205 #       list {
8206 #         type: DT_FLOAT
8207 #         type: DT_DOUBLE
8208 #         type: DT_INT32
8209 #         type: DT_UINT8
8210 #         type: DT_INT16
8211 #         type: DT_INT8
8212 #         type: DT_COMPLEX64
8213 #         type: DT_INT64
8214 #         type: DT_QINT8
8215 #         type: DT_QUINT8
8216 #         type: DT_QINT32
8217 #         type: DT_BFLOAT16
8218 #         type: DT_UINT16
8219 #         type: DT_COMPLEX128
8220 #         type: DT_HALF
8221 #         type: DT_UINT32
8222 #         type: DT_UINT64
8223 #       }
8224 #     }
8225 #   }
8226 #   attr {
8227 #     name: "data_format"
8228 #     type: "string"
8229 #     default_value {
8230 #       s: "NHWC"
8231 #     }
8232 #     allowed_values {
8233 #       list {
8234 #         s: "NHWC"
8235 #         s: "NCHW"
8236 #       }
8237 #     }
8238 #   }
8239 # }
8240 # op {
8241 #   name: "BiasAddV1"
8242 #   input_arg {
8243 #     name: "value"
8244 #     type_attr: "T"
8245 #   }
8246 #   input_arg {
8247 #     name: "bias"
8248 #     type_attr: "T"
8249 #   }
8250 #   output_arg {
8251 #     name: "output"
8252 #     type_attr: "T"
8253 #   }
8254 #   attr {
8255 #     name: "T"
8256 #     type: "type"
8257 #     allowed_values {
8258 #       list {
8259 #         type: DT_FLOAT
8260 #         type: DT_DOUBLE
8261 #         type: DT_INT32
8262 #         type: DT_UINT8
8263 #         type: DT_INT16
8264 #         type: DT_INT8
8265 #         type: DT_COMPLEX64
8266 #         type: DT_INT64
8267 #         type: DT_QINT8
8268 #         type: DT_QUINT8
8269 #         type: DT_QINT32
8270 #         type: DT_BFLOAT16
8271 #         type: DT_UINT16
8272 #         type: DT_COMPLEX128
8273 #         type: DT_HALF
8274 #         type: DT_UINT32
8275 #         type: DT_UINT64
8276 #       }
8277 #     }
8278 #   }
8279 # }
8280 # op {
8281 #   name: "Conv2D"
8282 #   input_arg {
8283 #     name: "input"
8284 #     type_attr: "T"
8285 #   }
8286 #   input_arg {
8287 #     name: "filter"
8288 #     type_attr: "T"
8289 #   }
8290 #   output_arg {
8291 #     name: "output"
8292 #     type_attr: "T"
8293 #   }
8294 #   attr {
8295 #     name: "T"
8296 #     type: "type"
8297 #     allowed_values {
8298 #       list {
8299 #         type: DT_HALF
8300 #         type: DT_BFLOAT16
8301 #         type: DT_FLOAT
8302 #         type: DT_DOUBLE
8303 #       }
8304 #     }
8305 #   }
8306 #   attr {
8307 #     name: "strides"
8308 #     type: "list(int)"
8309 #   }
8310 #   attr {
8311 #     name: "use_cudnn_on_gpu"
8312 #     type: "bool"
8313 #     default_value {
8314 #       b: true
8315 #     }
8316 #   }
8317 #   attr {
8318 #     name: "padding"
8319 #     type: "string"
8320 #     allowed_values {
8321 #       list {
8322 #         s: "SAME"
8323 #         s: "VALID"
8324 #       }
8325 #     }
8326 #   }
8327 #   attr {
8328 #     name: "data_format"
8329 #     type: "string"
8330 #     default_value {
8331 #       s: "NHWC"
8332 #     }
8333 #     allowed_values {
8334 #       list {
8335 #         s: "NHWC"
8336 #         s: "NCHW"
8337 #       }
8338 #     }
8339 #   }
8340 #   attr {
8341 #     name: "dilations"
8342 #     type: "list(int)"
8343 #     default_value {
8344 #       list {
8345 #         i: 1
8346 #         i: 1
8347 #         i: 1
8348 #         i: 1
8349 #       }
8350 #     }
8351 #   }
8352 # }
8353 # op {
8354 #   name: "Conv2DBackpropFilter"
8355 #   input_arg {
8356 #     name: "input"
8357 #     type_attr: "T"
8358 #   }
8359 #   input_arg {
8360 #     name: "filter_sizes"
8361 #     type: DT_INT32
8362 #   }
8363 #   input_arg {
8364 #     name: "out_backprop"
8365 #     type_attr: "T"
8366 #   }
8367 #   output_arg {
8368 #     name: "output"
8369 #     type_attr: "T"
8370 #   }
8371 #   attr {
8372 #     name: "T"
8373 #     type: "type"
8374 #     allowed_values {
8375 #       list {
8376 #         type: DT_HALF
8377 #         type: DT_BFLOAT16
8378 #         type: DT_FLOAT
8379 #         type: DT_DOUBLE
8380 #       }
8381 #     }
8382 #   }
8383 #   attr {
8384 #     name: "strides"
8385 #     type: "list(int)"
8386 #   }
8387 #   attr {
8388 #     name: "use_cudnn_on_gpu"
8389 #     type: "bool"
8390 #     default_value {
8391 #       b: true
8392 #     }
8393 #   }
8394 #   attr {
8395 #     name: "padding"
8396 #     type: "string"
8397 #     allowed_values {
8398 #       list {
8399 #         s: "SAME"
8400 #         s: "VALID"
8401 #       }
8402 #     }
8403 #   }
8404 #   attr {
8405 #     name: "data_format"
8406 #     type: "string"
8407 #     default_value {
8408 #       s: "NHWC"
8409 #     }
8410 #     allowed_values {
8411 #       list {
8412 #         s: "NHWC"
8413 #         s: "NCHW"
8414 #       }
8415 #     }
8416 #   }
8417 #   attr {
8418 #     name: "dilations"
8419 #     type: "list(int)"
8420 #     default_value {
8421 #       list {
8422 #         i: 1
8423 #         i: 1
8424 #         i: 1
8425 #         i: 1
8426 #       }
8427 #     }
8428 #   }
8429 # }
8430 # op {
8431 #   name: "Conv2DBackpropInput"
8432 #   input_arg {
8433 #     name: "input_sizes"
8434 #     type: DT_INT32
8435 #   }
8436 #   input_arg {
8437 #     name: "filter"
8438 #     type_attr: "T"
8439 #   }
8440 #   input_arg {
8441 #     name: "out_backprop"
8442 #     type_attr: "T"
8443 #   }
8444 #   output_arg {
8445 #     name: "output"
8446 #     type_attr: "T"
8447 #   }
8448 #   attr {
8449 #     name: "T"
8450 #     type: "type"
8451 #     allowed_values {
8452 #       list {
8453 #         type: DT_HALF
8454 #         type: DT_BFLOAT16
8455 #         type: DT_FLOAT
8456 #         type: DT_DOUBLE
8457 #       }
8458 #     }
8459 #   }
8460 #   attr {
8461 #     name: "strides"
8462 #     type: "list(int)"
8463 #   }
8464 #   attr {
8465 #     name: "use_cudnn_on_gpu"
8466 #     type: "bool"
8467 #     default_value {
8468 #       b: true
8469 #     }
8470 #   }
8471 #   attr {
8472 #     name: "padding"
8473 #     type: "string"
8474 #     allowed_values {
8475 #       list {
8476 #         s: "SAME"
8477 #         s: "VALID"
8478 #       }
8479 #     }
8480 #   }
8481 #   attr {
8482 #     name: "data_format"
8483 #     type: "string"
8484 #     default_value {
8485 #       s: "NHWC"
8486 #     }
8487 #     allowed_values {
8488 #       list {
8489 #         s: "NHWC"
8490 #         s: "NCHW"
8491 #       }
8492 #     }
8493 #   }
8494 #   attr {
8495 #     name: "dilations"
8496 #     type: "list(int)"
8497 #     default_value {
8498 #       list {
8499 #         i: 1
8500 #         i: 1
8501 #         i: 1
8502 #         i: 1
8503 #       }
8504 #     }
8505 #   }
8506 # }
8507 # op {
8508 #   name: "Conv3D"
8509 #   input_arg {
8510 #     name: "input"
8511 #     type_attr: "T"
8512 #   }
8513 #   input_arg {
8514 #     name: "filter"
8515 #     type_attr: "T"
8516 #   }
8517 #   output_arg {
8518 #     name: "output"
8519 #     type_attr: "T"
8520 #   }
8521 #   attr {
8522 #     name: "T"
8523 #     type: "type"
8524 #     allowed_values {
8525 #       list {
8526 #         type: DT_HALF
8527 #         type: DT_BFLOAT16
8528 #         type: DT_FLOAT
8529 #         type: DT_DOUBLE
8530 #       }
8531 #     }
8532 #   }
8533 #   attr {
8534 #     name: "strides"
8535 #     type: "list(int)"
8536 #     has_minimum: true
8537 #     minimum: 5
8538 #   }
8539 #   attr {
8540 #     name: "padding"
8541 #     type: "string"
8542 #     allowed_values {
8543 #       list {
8544 #         s: "SAME"
8545 #         s: "VALID"
8546 #       }
8547 #     }
8548 #   }
8549 #   attr {
8550 #     name: "data_format"
8551 #     type: "string"
8552 #     default_value {
8553 #       s: "NDHWC"
8554 #     }
8555 #     allowed_values {
8556 #       list {
8557 #         s: "NDHWC"
8558 #         s: "NCDHW"
8559 #       }
8560 #     }
8561 #   }
8562 #   attr {
8563 #     name: "dilations"
8564 #     type: "list(int)"
8565 #     default_value {
8566 #       list {
8567 #         i: 1
8568 #         i: 1
8569 #         i: 1
8570 #         i: 1
8571 #         i: 1
8572 #       }
8573 #     }
8574 #   }
8575 # }
8576 # op {
8577 #   name: "Conv3DBackpropFilter"
8578 #   input_arg {
8579 #     name: "input"
8580 #     type_attr: "T"
8581 #   }
8582 #   input_arg {
8583 #     name: "filter"
8584 #     type_attr: "T"
8585 #   }
8586 #   input_arg {
8587 #     name: "out_backprop"
8588 #     type_attr: "T"
8589 #   }
8590 #   output_arg {
8591 #     name: "output"
8592 #     type_attr: "T"
8593 #   }
8594 #   attr {
8595 #     name: "T"
8596 #     type: "type"
8597 #     allowed_values {
8598 #       list {
8599 #         type: DT_HALF
8600 #         type: DT_FLOAT
8601 #         type: DT_DOUBLE
8602 #       }
8603 #     }
8604 #   }
8605 #   attr {
8606 #     name: "strides"
8607 #     type: "list(int)"
8608 #     has_minimum: true
8609 #     minimum: 5
8610 #   }
8611 #   attr {
8612 #     name: "padding"
8613 #     type: "string"
8614 #     allowed_values {
8615 #       list {
8616 #         s: "SAME"
8617 #         s: "VALID"
8618 #       }
8619 #     }
8620 #   }
8621 #   attr {
8622 #     name: "dilations"
8623 #     type: "list(int)"
8624 #     default_value {
8625 #       list {
8626 #         i: 1
8627 #         i: 1
8628 #         i: 1
8629 #         i: 1
8630 #         i: 1
8631 #       }
8632 #     }
8633 #   }
8634 #   deprecation {
8635 #     version: 10
8636 #     explanation: "Use Conv3DBackpropFilterV2"
8637 #   }
8638 # }
8639 # op {
8640 #   name: "Conv3DBackpropFilterV2"
8641 #   input_arg {
8642 #     name: "input"
8643 #     type_attr: "T"
8644 #   }
8645 #   input_arg {
8646 #     name: "filter_sizes"
8647 #     type: DT_INT32
8648 #   }
8649 #   input_arg {
8650 #     name: "out_backprop"
8651 #     type_attr: "T"
8652 #   }
8653 #   output_arg {
8654 #     name: "output"
8655 #     type_attr: "T"
8656 #   }
8657 #   attr {
8658 #     name: "T"
8659 #     type: "type"
8660 #     allowed_values {
8661 #       list {
8662 #         type: DT_HALF
8663 #         type: DT_BFLOAT16
8664 #         type: DT_FLOAT
8665 #         type: DT_DOUBLE
8666 #       }
8667 #     }
8668 #   }
8669 #   attr {
8670 #     name: "strides"
8671 #     type: "list(int)"
8672 #     has_minimum: true
8673 #     minimum: 5
8674 #   }
8675 #   attr {
8676 #     name: "padding"
8677 #     type: "string"
8678 #     allowed_values {
8679 #       list {
8680 #         s: "SAME"
8681 #         s: "VALID"
8682 #       }
8683 #     }
8684 #   }
8685 #   attr {
8686 #     name: "data_format"
8687 #     type: "string"
8688 #     default_value {
8689 #       s: "NDHWC"
8690 #     }
8691 #     allowed_values {
8692 #       list {
8693 #         s: "NDHWC"
8694 #         s: "NCDHW"
8695 #       }
8696 #     }
8697 #   }
8698 #   attr {
8699 #     name: "dilations"
8700 #     type: "list(int)"
8701 #     default_value {
8702 #       list {
8703 #         i: 1
8704 #         i: 1
8705 #         i: 1
8706 #         i: 1
8707 #         i: 1
8708 #       }
8709 #     }
8710 #   }
8711 # }
8712 # op {
8713 #   name: "Conv3DBackpropInput"
8714 #   input_arg {
8715 #     name: "input"
8716 #     type_attr: "T"
8717 #   }
8718 #   input_arg {
8719 #     name: "filter"
8720 #     type_attr: "T"
8721 #   }
8722 #   input_arg {
8723 #     name: "out_backprop"
8724 #     type_attr: "T"
8725 #   }
8726 #   output_arg {
8727 #     name: "output"
8728 #     type_attr: "T"
8729 #   }
8730 #   attr {
8731 #     name: "T"
8732 #     type: "type"
8733 #     allowed_values {
8734 #       list {
8735 #         type: DT_HALF
8736 #         type: DT_FLOAT
8737 #         type: DT_DOUBLE
8738 #       }
8739 #     }
8740 #   }
8741 #   attr {
8742 #     name: "strides"
8743 #     type: "list(int)"
8744 #     has_minimum: true
8745 #     minimum: 5
8746 #   }
8747 #   attr {
8748 #     name: "padding"
8749 #     type: "string"
8750 #     allowed_values {
8751 #       list {
8752 #         s: "SAME"
8753 #         s: "VALID"
8754 #       }
8755 #     }
8756 #   }
8757 #   attr {
8758 #     name: "dilations"
8759 #     type: "list(int)"
8760 #     default_value {
8761 #       list {
8762 #         i: 1
8763 #         i: 1
8764 #         i: 1
8765 #         i: 1
8766 #         i: 1
8767 #       }
8768 #     }
8769 #   }
8770 #   deprecation {
8771 #     version: 10
8772 #     explanation: "Use Conv3DBackpropInputV2"
8773 #   }
8774 # }
8775 # op {
8776 #   name: "Conv3DBackpropInputV2"
8777 #   input_arg {
8778 #     name: "input_sizes"
8779 #     type_attr: "Tshape"
8780 #   }
8781 #   input_arg {
8782 #     name: "filter"
8783 #     type_attr: "T"
8784 #   }
8785 #   input_arg {
8786 #     name: "out_backprop"
8787 #     type_attr: "T"
8788 #   }
8789 #   output_arg {
8790 #     name: "output"
8791 #     type_attr: "T"
8792 #   }
8793 #   attr {
8794 #     name: "T"
8795 #     type: "type"
8796 #     allowed_values {
8797 #       list {
8798 #         type: DT_HALF
8799 #         type: DT_BFLOAT16
8800 #         type: DT_FLOAT
8801 #         type: DT_DOUBLE
8802 #       }
8803 #     }
8804 #   }
8805 #   attr {
8806 #     name: "strides"
8807 #     type: "list(int)"
8808 #     has_minimum: true
8809 #     minimum: 5
8810 #   }
8811 #   attr {
8812 #     name: "padding"
8813 #     type: "string"
8814 #     allowed_values {
8815 #       list {
8816 #         s: "SAME"
8817 #         s: "VALID"
8818 #       }
8819 #     }
8820 #   }
8821 #   attr {
8822 #     name: "data_format"
8823 #     type: "string"
8824 #     default_value {
8825 #       s: "NDHWC"
8826 #     }
8827 #     allowed_values {
8828 #       list {
8829 #         s: "NDHWC"
8830 #         s: "NCDHW"
8831 #       }
8832 #     }
8833 #   }
8834 #   attr {
8835 #     name: "dilations"
8836 #     type: "list(int)"
8837 #     default_value {
8838 #       list {
8839 #         i: 1
8840 #         i: 1
8841 #         i: 1
8842 #         i: 1
8843 #         i: 1
8844 #       }
8845 #     }
8846 #   }
8847 #   attr {
8848 #     name: "Tshape"
8849 #     type: "type"
8850 #     default_value {
8851 #       type: DT_INT32
8852 #     }
8853 #     allowed_values {
8854 #       list {
8855 #         type: DT_INT32
8856 #         type: DT_INT64
8857 #       }
8858 #     }
8859 #   }
8860 # }
8861 # op {
8862 #   name: "DataFormatDimMap"
8863 #   input_arg {
8864 #     name: "x"
8865 #     type_attr: "T"
8866 #   }
8867 #   output_arg {
8868 #     name: "y"
8869 #     type_attr: "T"
8870 #   }
8871 #   attr {
8872 #     name: "T"
8873 #     type: "type"
8874 #     default_value {
8875 #       type: DT_INT32
8876 #     }
8877 #     allowed_values {
8878 #       list {
8879 #         type: DT_INT32
8880 #         type: DT_INT64
8881 #       }
8882 #     }
8883 #   }
8884 #   attr {
8885 #     name: "src_format"
8886 #     type: "string"
8887 #     default_value {
8888 #       s: "NHWC"
8889 #     }
8890 #   }
8891 #   attr {
8892 #     name: "dst_format"
8893 #     type: "string"
8894 #     default_value {
8895 #       s: "NCHW"
8896 #     }
8897 #   }
8898 # }
8899 # op {
8900 #   name: "DataFormatVecPermute"
8901 #   input_arg {
8902 #     name: "x"
8903 #     type_attr: "T"
8904 #   }
8905 #   output_arg {
8906 #     name: "y"
8907 #     type_attr: "T"
8908 #   }
8909 #   attr {
8910 #     name: "T"
8911 #     type: "type"
8912 #     default_value {
8913 #       type: DT_INT32
8914 #     }
8915 #     allowed_values {
8916 #       list {
8917 #         type: DT_INT32
8918 #         type: DT_INT64
8919 #       }
8920 #     }
8921 #   }
8922 #   attr {
8923 #     name: "src_format"
8924 #     type: "string"
8925 #     default_value {
8926 #       s: "NHWC"
8927 #     }
8928 #   }
8929 #   attr {
8930 #     name: "dst_format"
8931 #     type: "string"
8932 #     default_value {
8933 #       s: "NCHW"
8934 #     }
8935 #   }
8936 # }
8937 # op {
8938 #   name: "DepthwiseConv2dNative"
8939 #   input_arg {
8940 #     name: "input"
8941 #     type_attr: "T"
8942 #   }
8943 #   input_arg {
8944 #     name: "filter"
8945 #     type_attr: "T"
8946 #   }
8947 #   output_arg {
8948 #     name: "output"
8949 #     type_attr: "T"
8950 #   }
8951 #   attr {
8952 #     name: "T"
8953 #     type: "type"
8954 #     allowed_values {
8955 #       list {
8956 #         type: DT_HALF
8957 #         type: DT_BFLOAT16
8958 #         type: DT_FLOAT
8959 #         type: DT_DOUBLE
8960 #       }
8961 #     }
8962 #   }
8963 #   attr {
8964 #     name: "strides"
8965 #     type: "list(int)"
8966 #   }
8967 #   attr {
8968 #     name: "padding"
8969 #     type: "string"
8970 #     allowed_values {
8971 #       list {
8972 #         s: "SAME"
8973 #         s: "VALID"
8974 #       }
8975 #     }
8976 #   }
8977 #   attr {
8978 #     name: "data_format"
8979 #     type: "string"
8980 #     default_value {
8981 #       s: "NHWC"
8982 #     }
8983 #     allowed_values {
8984 #       list {
8985 #         s: "NHWC"
8986 #         s: "NCHW"
8987 #       }
8988 #     }
8989 #   }
8990 #   attr {
8991 #     name: "dilations"
8992 #     type: "list(int)"
8993 #     default_value {
8994 #       list {
8995 #         i: 1
8996 #         i: 1
8997 #         i: 1
8998 #         i: 1
8999 #       }
9000 #     }
9001 #   }
9002 # }
9003 # op {
9004 #   name: "DepthwiseConv2dNativeBackpropFilter"
9005 #   input_arg {
9006 #     name: "input"
9007 #     type_attr: "T"
9008 #   }
9009 #   input_arg {
9010 #     name: "filter_sizes"
9011 #     type: DT_INT32
9012 #   }
9013 #   input_arg {
9014 #     name: "out_backprop"
9015 #     type_attr: "T"
9016 #   }
9017 #   output_arg {
9018 #     name: "output"
9019 #     type_attr: "T"
9020 #   }
9021 #   attr {
9022 #     name: "T"
9023 #     type: "type"
9024 #     allowed_values {
9025 #       list {
9026 #         type: DT_HALF
9027 #         type: DT_BFLOAT16
9028 #         type: DT_FLOAT
9029 #         type: DT_DOUBLE
9030 #       }
9031 #     }
9032 #   }
9033 #   attr {
9034 #     name: "strides"
9035 #     type: "list(int)"
9036 #   }
9037 #   attr {
9038 #     name: "padding"
9039 #     type: "string"
9040 #     allowed_values {
9041 #       list {
9042 #         s: "SAME"
9043 #         s: "VALID"
9044 #       }
9045 #     }
9046 #   }
9047 #   attr {
9048 #     name: "data_format"
9049 #     type: "string"
9050 #     default_value {
9051 #       s: "NHWC"
9052 #     }
9053 #     allowed_values {
9054 #       list {
9055 #         s: "NHWC"
9056 #         s: "NCHW"
9057 #       }
9058 #     }
9059 #   }
9060 #   attr {
9061 #     name: "dilations"
9062 #     type: "list(int)"
9063 #     default_value {
9064 #       list {
9065 #         i: 1
9066 #         i: 1
9067 #         i: 1
9068 #         i: 1
9069 #       }
9070 #     }
9071 #   }
9072 # }
9073 # op {
9074 #   name: "DepthwiseConv2dNativeBackpropInput"
9075 #   input_arg {
9076 #     name: "input_sizes"
9077 #     type: DT_INT32
9078 #   }
9079 #   input_arg {
9080 #     name: "filter"
9081 #     type_attr: "T"
9082 #   }
9083 #   input_arg {
9084 #     name: "out_backprop"
9085 #     type_attr: "T"
9086 #   }
9087 #   output_arg {
9088 #     name: "output"
9089 #     type_attr: "T"
9090 #   }
9091 #   attr {
9092 #     name: "T"
9093 #     type: "type"
9094 #     allowed_values {
9095 #       list {
9096 #         type: DT_HALF
9097 #         type: DT_BFLOAT16
9098 #         type: DT_FLOAT
9099 #         type: DT_DOUBLE
9100 #       }
9101 #     }
9102 #   }
9103 #   attr {
9104 #     name: "strides"
9105 #     type: "list(int)"
9106 #   }
9107 #   attr {
9108 #     name: "padding"
9109 #     type: "string"
9110 #     allowed_values {
9111 #       list {
9112 #         s: "SAME"
9113 #         s: "VALID"
9114 #       }
9115 #     }
9116 #   }
9117 #   attr {
9118 #     name: "data_format"
9119 #     type: "string"
9120 #     default_value {
9121 #       s: "NHWC"
9122 #     }
9123 #     allowed_values {
9124 #       list {
9125 #         s: "NHWC"
9126 #         s: "NCHW"
9127 #       }
9128 #     }
9129 #   }
9130 #   attr {
9131 #     name: "dilations"
9132 #     type: "list(int)"
9133 #     default_value {
9134 #       list {
9135 #         i: 1
9136 #         i: 1
9137 #         i: 1
9138 #         i: 1
9139 #       }
9140 #     }
9141 #   }
9142 # }
9143 # op {
9144 #   name: "Dilation2D"
9145 #   input_arg {
9146 #     name: "input"
9147 #     type_attr: "T"
9148 #   }
9149 #   input_arg {
9150 #     name: "filter"
9151 #     type_attr: "T"
9152 #   }
9153 #   output_arg {
9154 #     name: "output"
9155 #     type_attr: "T"
9156 #   }
9157 #   attr {
9158 #     name: "T"
9159 #     type: "type"
9160 #     allowed_values {
9161 #       list {
9162 #         type: DT_FLOAT
9163 #         type: DT_DOUBLE
9164 #         type: DT_INT32
9165 #         type: DT_UINT8
9166 #         type: DT_INT16
9167 #         type: DT_INT8
9168 #         type: DT_INT64
9169 #         type: DT_BFLOAT16
9170 #         type: DT_UINT16
9171 #         type: DT_HALF
9172 #         type: DT_UINT32
9173 #         type: DT_UINT64
9174 #       }
9175 #     }
9176 #   }
9177 #   attr {
9178 #     name: "strides"
9179 #     type: "list(int)"
9180 #     has_minimum: true
9181 #     minimum: 4
9182 #   }
9183 #   attr {
9184 #     name: "rates"
9185 #     type: "list(int)"
9186 #     has_minimum: true
9187 #     minimum: 4
9188 #   }
9189 #   attr {
9190 #     name: "padding"
9191 #     type: "string"
9192 #     allowed_values {
9193 #       list {
9194 #         s: "SAME"
9195 #         s: "VALID"
9196 #       }
9197 #     }
9198 #   }
9199 # }
9200 # op {
9201 #   name: "Dilation2DBackpropFilter"
9202 #   input_arg {
9203 #     name: "input"
9204 #     type_attr: "T"
9205 #   }
9206 #   input_arg {
9207 #     name: "filter"
9208 #     type_attr: "T"
9209 #   }
9210 #   input_arg {
9211 #     name: "out_backprop"
9212 #     type_attr: "T"
9213 #   }
9214 #   output_arg {
9215 #     name: "filter_backprop"
9216 #     type_attr: "T"
9217 #   }
9218 #   attr {
9219 #     name: "T"
9220 #     type: "type"
9221 #     allowed_values {
9222 #       list {
9223 #         type: DT_FLOAT
9224 #         type: DT_DOUBLE
9225 #         type: DT_INT32
9226 #         type: DT_UINT8
9227 #         type: DT_INT16
9228 #         type: DT_INT8
9229 #         type: DT_INT64
9230 #         type: DT_BFLOAT16
9231 #         type: DT_UINT16
9232 #         type: DT_HALF
9233 #         type: DT_UINT32
9234 #         type: DT_UINT64
9235 #       }
9236 #     }
9237 #   }
9238 #   attr {
9239 #     name: "strides"
9240 #     type: "list(int)"
9241 #     has_minimum: true
9242 #     minimum: 4
9243 #   }
9244 #   attr {
9245 #     name: "rates"
9246 #     type: "list(int)"
9247 #     has_minimum: true
9248 #     minimum: 4
9249 #   }
9250 #   attr {
9251 #     name: "padding"
9252 #     type: "string"
9253 #     allowed_values {
9254 #       list {
9255 #         s: "SAME"
9256 #         s: "VALID"
9257 #       }
9258 #     }
9259 #   }
9260 # }
9261 # op {
9262 #   name: "Dilation2DBackpropInput"
9263 #   input_arg {
9264 #     name: "input"
9265 #     type_attr: "T"
9266 #   }
9267 #   input_arg {
9268 #     name: "filter"
9269 #     type_attr: "T"
9270 #   }
9271 #   input_arg {
9272 #     name: "out_backprop"
9273 #     type_attr: "T"
9274 #   }
9275 #   output_arg {
9276 #     name: "in_backprop"
9277 #     type_attr: "T"
9278 #   }
9279 #   attr {
9280 #     name: "T"
9281 #     type: "type"
9282 #     allowed_values {
9283 #       list {
9284 #         type: DT_FLOAT
9285 #         type: DT_DOUBLE
9286 #         type: DT_INT32
9287 #         type: DT_UINT8
9288 #         type: DT_INT16
9289 #         type: DT_INT8
9290 #         type: DT_INT64
9291 #         type: DT_BFLOAT16
9292 #         type: DT_UINT16
9293 #         type: DT_HALF
9294 #         type: DT_UINT32
9295 #         type: DT_UINT64
9296 #       }
9297 #     }
9298 #   }
9299 #   attr {
9300 #     name: "strides"
9301 #     type: "list(int)"
9302 #     has_minimum: true
9303 #     minimum: 4
9304 #   }
9305 #   attr {
9306 #     name: "rates"
9307 #     type: "list(int)"
9308 #     has_minimum: true
9309 #     minimum: 4
9310 #   }
9311 #   attr {
9312 #     name: "padding"
9313 #     type: "string"
9314 #     allowed_values {
9315 #       list {
9316 #         s: "SAME"
9317 #         s: "VALID"
9318 #       }
9319 #     }
9320 #   }
9321 # }
9322 # op {
9323 #   name: "Elu"
9324 #   input_arg {
9325 #     name: "features"
9326 #     type_attr: "T"
9327 #   }
9328 #   output_arg {
9329 #     name: "activations"
9330 #     type_attr: "T"
9331 #   }
9332 #   attr {
9333 #     name: "T"
9334 #     type: "type"
9335 #     allowed_values {
9336 #       list {
9337 #         type: DT_HALF
9338 #         type: DT_BFLOAT16
9339 #         type: DT_FLOAT
9340 #         type: DT_DOUBLE
9341 #       }
9342 #     }
9343 #   }
9344 # }
9345 # op {
9346 #   name: "EluGrad"
9347 #   input_arg {
9348 #     name: "gradients"
9349 #     type_attr: "T"
9350 #   }
9351 #   input_arg {
9352 #     name: "outputs"
9353 #     type_attr: "T"
9354 #   }
9355 #   output_arg {
9356 #     name: "backprops"
9357 #     type_attr: "T"
9358 #   }
9359 #   attr {
9360 #     name: "T"
9361 #     type: "type"
9362 #     allowed_values {
9363 #       list {
9364 #         type: DT_HALF
9365 #         type: DT_BFLOAT16
9366 #         type: DT_FLOAT
9367 #         type: DT_DOUBLE
9368 #       }
9369 #     }
9370 #   }
9371 # }
9372 # op {
9373 #   name: "FractionalAvgPool"
9374 #   input_arg {
9375 #     name: "value"
9376 #     type_attr: "T"
9377 #   }
9378 #   output_arg {
9379 #     name: "output"
9380 #     type_attr: "T"
9381 #   }
9382 #   output_arg {
9383 #     name: "row_pooling_sequence"
9384 #     type: DT_INT64
9385 #   }
9386 #   output_arg {
9387 #     name: "col_pooling_sequence"
9388 #     type: DT_INT64
9389 #   }
9390 #   attr {
9391 #     name: "pooling_ratio"
9392 #     type: "list(float)"
9393 #     has_minimum: true
9394 #     minimum: 4
9395 #   }
9396 #   attr {
9397 #     name: "pseudo_random"
9398 #     type: "bool"
9399 #     default_value {
9400 #       b: false
9401 #     }
9402 #   }
9403 #   attr {
9404 #     name: "overlapping"
9405 #     type: "bool"
9406 #     default_value {
9407 #       b: false
9408 #     }
9409 #   }
9410 #   attr {
9411 #     name: "deterministic"
9412 #     type: "bool"
9413 #     default_value {
9414 #       b: false
9415 #     }
9416 #   }
9417 #   attr {
9418 #     name: "seed"
9419 #     type: "int"
9420 #     default_value {
9421 #       i: 0
9422 #     }
9423 #   }
9424 #   attr {
9425 #     name: "seed2"
9426 #     type: "int"
9427 #     default_value {
9428 #       i: 0
9429 #     }
9430 #   }
9431 #   attr {
9432 #     name: "T"
9433 #     type: "type"
9434 #     allowed_values {
9435 #       list {
9436 #         type: DT_FLOAT
9437 #         type: DT_DOUBLE
9438 #         type: DT_INT32
9439 #         type: DT_INT64
9440 #       }
9441 #     }
9442 #   }
9443 # }
9444 # op {
9445 #   name: "FractionalAvgPoolGrad"
9446 #   input_arg {
9447 #     name: "orig_input_tensor_shape"
9448 #     type: DT_INT64
9449 #   }
9450 #   input_arg {
9451 #     name: "out_backprop"
9452 #     type_attr: "T"
9453 #   }
9454 #   input_arg {
9455 #     name: "row_pooling_sequence"
9456 #     type: DT_INT64
9457 #   }
9458 #   input_arg {
9459 #     name: "col_pooling_sequence"
9460 #     type: DT_INT64
9461 #   }
9462 #   output_arg {
9463 #     name: "output"
9464 #     type_attr: "T"
9465 #   }
9466 #   attr {
9467 #     name: "overlapping"
9468 #     type: "bool"
9469 #     default_value {
9470 #       b: false
9471 #     }
9472 #   }
9473 #   attr {
9474 #     name: "T"
9475 #     type: "type"
9476 #     allowed_values {
9477 #       list {
9478 #         type: DT_FLOAT
9479 #         type: DT_DOUBLE
9480 #         type: DT_INT32
9481 #         type: DT_INT64
9482 #       }
9483 #     }
9484 #   }
9485 # }
9486 # op {
9487 #   name: "FractionalMaxPool"
9488 #   input_arg {
9489 #     name: "value"
9490 #     type_attr: "T"
9491 #   }
9492 #   output_arg {
9493 #     name: "output"
9494 #     type_attr: "T"
9495 #   }
9496 #   output_arg {
9497 #     name: "row_pooling_sequence"
9498 #     type: DT_INT64
9499 #   }
9500 #   output_arg {
9501 #     name: "col_pooling_sequence"
9502 #     type: DT_INT64
9503 #   }
9504 #   attr {
9505 #     name: "pooling_ratio"
9506 #     type: "list(float)"
9507 #     has_minimum: true
9508 #     minimum: 4
9509 #   }
9510 #   attr {
9511 #     name: "pseudo_random"
9512 #     type: "bool"
9513 #     default_value {
9514 #       b: false
9515 #     }
9516 #   }
9517 #   attr {
9518 #     name: "overlapping"
9519 #     type: "bool"
9520 #     default_value {
9521 #       b: false
9522 #     }
9523 #   }
9524 #   attr {
9525 #     name: "deterministic"
9526 #     type: "bool"
9527 #     default_value {
9528 #       b: false
9529 #     }
9530 #   }
9531 #   attr {
9532 #     name: "seed"
9533 #     type: "int"
9534 #     default_value {
9535 #       i: 0
9536 #     }
9537 #   }
9538 #   attr {
9539 #     name: "seed2"
9540 #     type: "int"
9541 #     default_value {
9542 #       i: 0
9543 #     }
9544 #   }
9545 #   attr {
9546 #     name: "T"
9547 #     type: "type"
9548 #     allowed_values {
9549 #       list {
9550 #         type: DT_FLOAT
9551 #         type: DT_DOUBLE
9552 #         type: DT_INT32
9553 #         type: DT_INT64
9554 #       }
9555 #     }
9556 #   }
9557 # }
9558 # op {
9559 #   name: "FractionalMaxPoolGrad"
9560 #   input_arg {
9561 #     name: "orig_input"
9562 #     type_attr: "T"
9563 #   }
9564 #   input_arg {
9565 #     name: "orig_output"
9566 #     type_attr: "T"
9567 #   }
9568 #   input_arg {
9569 #     name: "out_backprop"
9570 #     type_attr: "T"
9571 #   }
9572 #   input_arg {
9573 #     name: "row_pooling_sequence"
9574 #     type: DT_INT64
9575 #   }
9576 #   input_arg {
9577 #     name: "col_pooling_sequence"
9578 #     type: DT_INT64
9579 #   }
9580 #   output_arg {
9581 #     name: "output"
9582 #     type_attr: "T"
9583 #   }
9584 #   attr {
9585 #     name: "overlapping"
9586 #     type: "bool"
9587 #     default_value {
9588 #       b: false
9589 #     }
9590 #   }
9591 #   attr {
9592 #     name: "T"
9593 #     type: "type"
9594 #     allowed_values {
9595 #       list {
9596 #         type: DT_FLOAT
9597 #         type: DT_DOUBLE
9598 #         type: DT_INT32
9599 #         type: DT_INT64
9600 #       }
9601 #     }
9602 #   }
9603 # }
9604 # op {
9605 #   name: "FusedBatchNorm"
9606 #   input_arg {
9607 #     name: "x"
9608 #     type_attr: "T"
9609 #   }
9610 #   input_arg {
9611 #     name: "scale"
9612 #     type_attr: "T"
9613 #   }
9614 #   input_arg {
9615 #     name: "offset"
9616 #     type_attr: "T"
9617 #   }
9618 #   input_arg {
9619 #     name: "mean"
9620 #     type_attr: "T"
9621 #   }
9622 #   input_arg {
9623 #     name: "variance"
9624 #     type_attr: "T"
9625 #   }
9626 #   output_arg {
9627 #     name: "y"
9628 #     type_attr: "T"
9629 #   }
9630 #   output_arg {
9631 #     name: "batch_mean"
9632 #     type_attr: "T"
9633 #   }
9634 #   output_arg {
9635 #     name: "batch_variance"
9636 #     type_attr: "T"
9637 #   }
9638 #   output_arg {
9639 #     name: "reserve_space_1"
9640 #     type_attr: "T"
9641 #   }
9642 #   output_arg {
9643 #     name: "reserve_space_2"
9644 #     type_attr: "T"
9645 #   }
9646 #   attr {
9647 #     name: "T"
9648 #     type: "type"
9649 #     allowed_values {
9650 #       list {
9651 #         type: DT_FLOAT
9652 #       }
9653 #     }
9654 #   }
9655 #   attr {
9656 #     name: "epsilon"
9657 #     type: "float"
9658 #     default_value {
9659 #       f: 0.0001
9660 #     }
9661 #   }
9662 #   attr {
9663 #     name: "data_format"
9664 #     type: "string"
9665 #     default_value {
9666 #       s: "NHWC"
9667 #     }
9668 #     allowed_values {
9669 #       list {
9670 #         s: "NHWC"
9671 #         s: "NCHW"
9672 #       }
9673 #     }
9674 #   }
9675 #   attr {
9676 #     name: "is_training"
9677 #     type: "bool"
9678 #     default_value {
9679 #       b: true
9680 #     }
9681 #   }
9682 # }
9683 # op {
9684 #   name: "FusedBatchNormGrad"
9685 #   input_arg {
9686 #     name: "y_backprop"
9687 #     type_attr: "T"
9688 #   }
9689 #   input_arg {
9690 #     name: "x"
9691 #     type_attr: "T"
9692 #   }
9693 #   input_arg {
9694 #     name: "scale"
9695 #     type_attr: "T"
9696 #   }
9697 #   input_arg {
9698 #     name: "reserve_space_1"
9699 #     type_attr: "T"
9700 #   }
9701 #   input_arg {
9702 #     name: "reserve_space_2"
9703 #     type_attr: "T"
9704 #   }
9705 #   output_arg {
9706 #     name: "x_backprop"
9707 #     type_attr: "T"
9708 #   }
9709 #   output_arg {
9710 #     name: "scale_backprop"
9711 #     type_attr: "T"
9712 #   }
9713 #   output_arg {
9714 #     name: "offset_backprop"
9715 #     type_attr: "T"
9716 #   }
9717 #   output_arg {
9718 #     name: "reserve_space_3"
9719 #     type_attr: "T"
9720 #   }
9721 #   output_arg {
9722 #     name: "reserve_space_4"
9723 #     type_attr: "T"
9724 #   }
9725 #   attr {
9726 #     name: "T"
9727 #     type: "type"
9728 #     allowed_values {
9729 #       list {
9730 #         type: DT_FLOAT
9731 #       }
9732 #     }
9733 #   }
9734 #   attr {
9735 #     name: "epsilon"
9736 #     type: "float"
9737 #     default_value {
9738 #       f: 0.0001
9739 #     }
9740 #   }
9741 #   attr {
9742 #     name: "data_format"
9743 #     type: "string"
9744 #     default_value {
9745 #       s: "NHWC"
9746 #     }
9747 #     allowed_values {
9748 #       list {
9749 #         s: "NHWC"
9750 #         s: "NCHW"
9751 #       }
9752 #     }
9753 #   }
9754 #   attr {
9755 #     name: "is_training"
9756 #     type: "bool"
9757 #     default_value {
9758 #       b: true
9759 #     }
9760 #   }
9761 # }
9762 # op {
9763 #   name: "FusedBatchNormGradV2"
9764 #   input_arg {
9765 #     name: "y_backprop"
9766 #     type_attr: "T"
9767 #   }
9768 #   input_arg {
9769 #     name: "x"
9770 #     type_attr: "T"
9771 #   }
9772 #   input_arg {
9773 #     name: "scale"
9774 #     type: DT_FLOAT
9775 #   }
9776 #   input_arg {
9777 #     name: "reserve_space_1"
9778 #     type_attr: "U"
9779 #   }
9780 #   input_arg {
9781 #     name: "reserve_space_2"
9782 #     type_attr: "U"
9783 #   }
9784 #   output_arg {
9785 #     name: "x_backprop"
9786 #     type_attr: "T"
9787 #   }
9788 #   output_arg {
9789 #     name: "scale_backprop"
9790 #     type_attr: "U"
9791 #   }
9792 #   output_arg {
9793 #     name: "offset_backprop"
9794 #     type_attr: "U"
9795 #   }
9796 #   output_arg {
9797 #     name: "reserve_space_3"
9798 #     type_attr: "U"
9799 #   }
9800 #   output_arg {
9801 #     name: "reserve_space_4"
9802 #     type_attr: "U"
9803 #   }
9804 #   attr {
9805 #     name: "T"
9806 #     type: "type"
9807 #     allowed_values {
9808 #       list {
9809 #         type: DT_HALF
9810 #         type: DT_BFLOAT16
9811 #         type: DT_FLOAT
9812 #       }
9813 #     }
9814 #   }
9815 #   attr {
9816 #     name: "U"
9817 #     type: "type"
9818 #     allowed_values {
9819 #       list {
9820 #         type: DT_FLOAT
9821 #       }
9822 #     }
9823 #   }
9824 #   attr {
9825 #     name: "epsilon"
9826 #     type: "float"
9827 #     default_value {
9828 #       f: 0.0001
9829 #     }
9830 #   }
9831 #   attr {
9832 #     name: "data_format"
9833 #     type: "string"
9834 #     default_value {
9835 #       s: "NHWC"
9836 #     }
9837 #     allowed_values {
9838 #       list {
9839 #         s: "NHWC"
9840 #         s: "NCHW"
9841 #       }
9842 #     }
9843 #   }
9844 #   attr {
9845 #     name: "is_training"
9846 #     type: "bool"
9847 #     default_value {
9848 #       b: true
9849 #     }
9850 #   }
9851 # }
9852 # op {
9853 #   name: "FusedBatchNormV2"
9854 #   input_arg {
9855 #     name: "x"
9856 #     type_attr: "T"
9857 #   }
9858 #   input_arg {
9859 #     name: "scale"
9860 #     type_attr: "U"
9861 #   }
9862 #   input_arg {
9863 #     name: "offset"
9864 #     type_attr: "U"
9865 #   }
9866 #   input_arg {
9867 #     name: "mean"
9868 #     type_attr: "U"
9869 #   }
9870 #   input_arg {
9871 #     name: "variance"
9872 #     type_attr: "U"
9873 #   }
9874 #   output_arg {
9875 #     name: "y"
9876 #     type_attr: "T"
9877 #   }
9878 #   output_arg {
9879 #     name: "batch_mean"
9880 #     type_attr: "U"
9881 #   }
9882 #   output_arg {
9883 #     name: "batch_variance"
9884 #     type_attr: "U"
9885 #   }
9886 #   output_arg {
9887 #     name: "reserve_space_1"
9888 #     type_attr: "U"
9889 #   }
9890 #   output_arg {
9891 #     name: "reserve_space_2"
9892 #     type_attr: "U"
9893 #   }
9894 #   attr {
9895 #     name: "T"
9896 #     type: "type"
9897 #     allowed_values {
9898 #       list {
9899 #         type: DT_HALF
9900 #         type: DT_BFLOAT16
9901 #         type: DT_FLOAT
9902 #       }
9903 #     }
9904 #   }
9905 #   attr {
9906 #     name: "U"
9907 #     type: "type"
9908 #     allowed_values {
9909 #       list {
9910 #         type: DT_FLOAT
9911 #       }
9912 #     }
9913 #   }
9914 #   attr {
9915 #     name: "epsilon"
9916 #     type: "float"
9917 #     default_value {
9918 #       f: 0.0001
9919 #     }
9920 #   }
9921 #   attr {
9922 #     name: "data_format"
9923 #     type: "string"
9924 #     default_value {
9925 #       s: "NHWC"
9926 #     }
9927 #     allowed_values {
9928 #       list {
9929 #         s: "NHWC"
9930 #         s: "NCHW"
9931 #       }
9932 #     }
9933 #   }
9934 #   attr {
9935 #     name: "is_training"
9936 #     type: "bool"
9937 #     default_value {
9938 #       b: true
9939 #     }
9940 #   }
9941 # }
9942 # op {
9943 #   name: "FusedPadConv2D"
9944 #   input_arg {
9945 #     name: "input"
9946 #     type_attr: "T"
9947 #   }
9948 #   input_arg {
9949 #     name: "paddings"
9950 #     type: DT_INT32
9951 #   }
9952 #   input_arg {
9953 #     name: "filter"
9954 #     type_attr: "T"
9955 #   }
9956 #   output_arg {
9957 #     name: "output"
9958 #     type_attr: "T"
9959 #   }
9960 #   attr {
9961 #     name: "T"
9962 #     type: "type"
9963 #     allowed_values {
9964 #       list {
9965 #         type: DT_HALF
9966 #         type: DT_FLOAT
9967 #         type: DT_DOUBLE
9968 #       }
9969 #     }
9970 #   }
9971 #   attr {
9972 #     name: "mode"
9973 #     type: "string"
9974 #     allowed_values {
9975 #       list {
9976 #         s: "REFLECT"
9977 #         s: "SYMMETRIC"
9978 #       }
9979 #     }
9980 #   }
9981 #   attr {
9982 #     name: "strides"
9983 #     type: "list(int)"
9984 #   }
9985 #   attr {
9986 #     name: "padding"
9987 #     type: "string"
9988 #     allowed_values {
9989 #       list {
9990 #         s: "SAME"
9991 #         s: "VALID"
9992 #       }
9993 #     }
9994 #   }
9995 # }
9996 # op {
9997 #   name: "FusedResizeAndPadConv2D"
9998 #   input_arg {
9999 #     name: "input"
10000 #     type_attr: "T"
10001 #   }
10002 #   input_arg {
10003 #     name: "size"
10004 #     type: DT_INT32
10005 #   }
10006 #   input_arg {
10007 #     name: "paddings"
10008 #     type: DT_INT32
10009 #   }
10010 #   input_arg {
10011 #     name: "filter"
10012 #     type_attr: "T"
10013 #   }
10014 #   output_arg {
10015 #     name: "output"
10016 #     type_attr: "T"
10017 #   }
10018 #   attr {
10019 #     name: "T"
10020 #     type: "type"
10021 #     allowed_values {
10022 #       list {
10023 #         type: DT_HALF
10024 #         type: DT_FLOAT
10025 #         type: DT_DOUBLE
10026 #       }
10027 #     }
10028 #   }
10029 #   attr {
10030 #     name: "resize_align_corners"
10031 #     type: "bool"
10032 #     default_value {
10033 #       b: false
10034 #     }
10035 #   }
10036 #   attr {
10037 #     name: "mode"
10038 #     type: "string"
10039 #     allowed_values {
10040 #       list {
10041 #         s: "REFLECT"
10042 #         s: "SYMMETRIC"
10043 #       }
10044 #     }
10045 #   }
10046 #   attr {
10047 #     name: "strides"
10048 #     type: "list(int)"
10049 #   }
10050 #   attr {
10051 #     name: "padding"
10052 #     type: "string"
10053 #     allowed_values {
10054 #       list {
10055 #         s: "SAME"
10056 #         s: "VALID"
10057 #       }
10058 #     }
10059 #   }
10060 # }
10061 # op {
10062 #   name: "InTopK"
10063 #   input_arg {
10064 #     name: "predictions"
10065 #     type: DT_FLOAT
10066 #   }
10067 #   input_arg {
10068 #     name: "targets"
10069 #     type_attr: "T"
10070 #   }
10071 #   output_arg {
10072 #     name: "precision"
10073 #     type: DT_BOOL
10074 #   }
10075 #   attr {
10076 #     name: "k"
10077 #     type: "int"
10078 #   }
10079 #   attr {
10080 #     name: "T"
10081 #     type: "type"
10082 #     default_value {
10083 #       type: DT_INT32
10084 #     }
10085 #     allowed_values {
10086 #       list {
10087 #         type: DT_INT32
10088 #         type: DT_INT64
10089 #       }
10090 #     }
10091 #   }
10092 # }
10093 # op {
10094 #   name: "InTopKV2"
10095 #   input_arg {
10096 #     name: "predictions"
10097 #     type: DT_FLOAT
10098 #   }
10099 #   input_arg {
10100 #     name: "targets"
10101 #     type_attr: "T"
10102 #   }
10103 #   input_arg {
10104 #     name: "k"
10105 #     type_attr: "T"
10106 #   }
10107 #   output_arg {
10108 #     name: "precision"
10109 #     type: DT_BOOL
10110 #   }
10111 #   attr {
10112 #     name: "T"
10113 #     type: "type"
10114 #     default_value {
10115 #       type: DT_INT32
10116 #     }
10117 #     allowed_values {
10118 #       list {
10119 #         type: DT_INT32
10120 #         type: DT_INT64
10121 #       }
10122 #     }
10123 #   }
10124 # }
10125 # op {
10126 #   name: "L2Loss"
10127 #   input_arg {
10128 #     name: "t"
10129 #     type_attr: "T"
10130 #   }
10131 #   output_arg {
10132 #     name: "output"
10133 #     type_attr: "T"
10134 #   }
10135 #   attr {
10136 #     name: "T"
10137 #     type: "type"
10138 #     allowed_values {
10139 #       list {
10140 #         type: DT_HALF
10141 #         type: DT_BFLOAT16
10142 #         type: DT_FLOAT
10143 #         type: DT_DOUBLE
10144 #       }
10145 #     }
10146 #   }
10147 # }
10148 # op {
10149 #   name: "LRN"
10150 #   input_arg {
10151 #     name: "input"
10152 #     type_attr: "T"
10153 #   }
10154 #   output_arg {
10155 #     name: "output"
10156 #     type_attr: "T"
10157 #   }
10158 #   attr {
10159 #     name: "depth_radius"
10160 #     type: "int"
10161 #     default_value {
10162 #       i: 5
10163 #     }
10164 #   }
10165 #   attr {
10166 #     name: "bias"
10167 #     type: "float"
10168 #     default_value {
10169 #       f: 1
10170 #     }
10171 #   }
10172 #   attr {
10173 #     name: "alpha"
10174 #     type: "float"
10175 #     default_value {
10176 #       f: 1
10177 #     }
10178 #   }
10179 #   attr {
10180 #     name: "beta"
10181 #     type: "float"
10182 #     default_value {
10183 #       f: 0.5
10184 #     }
10185 #   }
10186 #   attr {
10187 #     name: "T"
10188 #     type: "type"
10189 #     default_value {
10190 #       type: DT_FLOAT
10191 #     }
10192 #     allowed_values {
10193 #       list {
10194 #         type: DT_HALF
10195 #         type: DT_BFLOAT16
10196 #         type: DT_FLOAT
10197 #       }
10198 #     }
10199 #   }
10200 # }
10201 # op {
10202 #   name: "LRNGrad"
10203 #   input_arg {
10204 #     name: "input_grads"
10205 #     type_attr: "T"
10206 #   }
10207 #   input_arg {
10208 #     name: "input_image"
10209 #     type_attr: "T"
10210 #   }
10211 #   input_arg {
10212 #     name: "output_image"
10213 #     type_attr: "T"
10214 #   }
10215 #   output_arg {
10216 #     name: "output"
10217 #     type_attr: "T"
10218 #   }
10219 #   attr {
10220 #     name: "depth_radius"
10221 #     type: "int"
10222 #     default_value {
10223 #       i: 5
10224 #     }
10225 #   }
10226 #   attr {
10227 #     name: "bias"
10228 #     type: "float"
10229 #     default_value {
10230 #       f: 1
10231 #     }
10232 #   }
10233 #   attr {
10234 #     name: "alpha"
10235 #     type: "float"
10236 #     default_value {
10237 #       f: 1
10238 #     }
10239 #   }
10240 #   attr {
10241 #     name: "beta"
10242 #     type: "float"
10243 #     default_value {
10244 #       f: 0.5
10245 #     }
10246 #   }
10247 #   attr {
10248 #     name: "T"
10249 #     type: "type"
10250 #     default_value {
10251 #       type: DT_FLOAT
10252 #     }
10253 #     allowed_values {
10254 #       list {
10255 #         type: DT_HALF
10256 #         type: DT_BFLOAT16
10257 #         type: DT_FLOAT
10258 #       }
10259 #     }
10260 #   }
10261 # }
10262 # op {
10263 #   name: "LogSoftmax"
10264 #   input_arg {
10265 #     name: "logits"
10266 #     type_attr: "T"
10267 #   }
10268 #   output_arg {
10269 #     name: "logsoftmax"
10270 #     type_attr: "T"
10271 #   }
10272 #   attr {
10273 #     name: "T"
10274 #     type: "type"
10275 #     allowed_values {
10276 #       list {
10277 #         type: DT_HALF
10278 #         type: DT_BFLOAT16
10279 #         type: DT_FLOAT
10280 #         type: DT_DOUBLE
10281 #       }
10282 #     }
10283 #   }
10284 # }
10285 # op {
10286 #   name: "MaxPool"
10287 #   input_arg {
10288 #     name: "input"
10289 #     type_attr: "T"
10290 #   }
10291 #   output_arg {
10292 #     name: "output"
10293 #     type_attr: "T"
10294 #   }
10295 #   attr {
10296 #     name: "T"
10297 #     type: "type"
10298 #     default_value {
10299 #       type: DT_FLOAT
10300 #     }
10301 #     allowed_values {
10302 #       list {
10303 #         type: DT_HALF
10304 #         type: DT_BFLOAT16
10305 #         type: DT_FLOAT
10306 #         type: DT_DOUBLE
10307 #         type: DT_INT32
10308 #         type: DT_INT64
10309 #         type: DT_UINT8
10310 #         type: DT_INT16
10311 #         type: DT_INT8
10312 #         type: DT_UINT16
10313 #         type: DT_QINT8
10314 #       }
10315 #     }
10316 #   }
10317 #   attr {
10318 #     name: "ksize"
10319 #     type: "list(int)"
10320 #     has_minimum: true
10321 #     minimum: 4
10322 #   }
10323 #   attr {
10324 #     name: "strides"
10325 #     type: "list(int)"
10326 #     has_minimum: true
10327 #     minimum: 4
10328 #   }
10329 #   attr {
10330 #     name: "padding"
10331 #     type: "string"
10332 #     allowed_values {
10333 #       list {
10334 #         s: "SAME"
10335 #         s: "VALID"
10336 #       }
10337 #     }
10338 #   }
10339 #   attr {
10340 #     name: "data_format"
10341 #     type: "string"
10342 #     default_value {
10343 #       s: "NHWC"
10344 #     }
10345 #     allowed_values {
10346 #       list {
10347 #         s: "NHWC"
10348 #         s: "NCHW"
10349 #         s: "NCHW_VECT_C"
10350 #       }
10351 #     }
10352 #   }
10353 # }
10354 # op {
10355 #   name: "MaxPool3D"
10356 #   input_arg {
10357 #     name: "input"
10358 #     type_attr: "T"
10359 #   }
10360 #   output_arg {
10361 #     name: "output"
10362 #     type_attr: "T"
10363 #   }
10364 #   attr {
10365 #     name: "ksize"
10366 #     type: "list(int)"
10367 #     has_minimum: true
10368 #     minimum: 5
10369 #   }
10370 #   attr {
10371 #     name: "strides"
10372 #     type: "list(int)"
10373 #     has_minimum: true
10374 #     minimum: 5
10375 #   }
10376 #   attr {
10377 #     name: "padding"
10378 #     type: "string"
10379 #     allowed_values {
10380 #       list {
10381 #         s: "SAME"
10382 #         s: "VALID"
10383 #       }
10384 #     }
10385 #   }
10386 #   attr {
10387 #     name: "data_format"
10388 #     type: "string"
10389 #     default_value {
10390 #       s: "NDHWC"
10391 #     }
10392 #     allowed_values {
10393 #       list {
10394 #         s: "NDHWC"
10395 #         s: "NCDHW"
10396 #       }
10397 #     }
10398 #   }
10399 #   attr {
10400 #     name: "T"
10401 #     type: "type"
10402 #     allowed_values {
10403 #       list {
10404 #         type: DT_HALF
10405 #         type: DT_BFLOAT16
10406 #         type: DT_FLOAT
10407 #       }
10408 #     }
10409 #   }
10410 # }
10411 # op {
10412 #   name: "MaxPool3DGrad"
10413 #   input_arg {
10414 #     name: "orig_input"
10415 #     type_attr: "TInput"
10416 #   }
10417 #   input_arg {
10418 #     name: "orig_output"
10419 #     type_attr: "TInput"
10420 #   }
10421 #   input_arg {
10422 #     name: "grad"
10423 #     type_attr: "T"
10424 #   }
10425 #   output_arg {
10426 #     name: "output"
10427 #     type_attr: "T"
10428 #   }
10429 #   attr {
10430 #     name: "ksize"
10431 #     type: "list(int)"
10432 #     has_minimum: true
10433 #     minimum: 5
10434 #   }
10435 #   attr {
10436 #     name: "strides"
10437 #     type: "list(int)"
10438 #     has_minimum: true
10439 #     minimum: 5
10440 #   }
10441 #   attr {
10442 #     name: "padding"
10443 #     type: "string"
10444 #     allowed_values {
10445 #       list {
10446 #         s: "SAME"
10447 #         s: "VALID"
10448 #       }
10449 #     }
10450 #   }
10451 #   attr {
10452 #     name: "data_format"
10453 #     type: "string"
10454 #     default_value {
10455 #       s: "NDHWC"
10456 #     }
10457 #     allowed_values {
10458 #       list {
10459 #         s: "NDHWC"
10460 #         s: "NCDHW"
10461 #       }
10462 #     }
10463 #   }
10464 #   attr {
10465 #     name: "T"
10466 #     type: "type"
10467 #     default_value {
10468 #       type: DT_FLOAT
10469 #     }
10470 #     allowed_values {
10471 #       list {
10472 #         type: DT_HALF
10473 #         type: DT_BFLOAT16
10474 #         type: DT_FLOAT
10475 #       }
10476 #     }
10477 #   }
10478 #   attr {
10479 #     name: "TInput"
10480 #     type: "type"
10481 #     default_value {
10482 #       type: DT_FLOAT
10483 #     }
10484 #     allowed_values {
10485 #       list {
10486 #         type: DT_HALF
10487 #         type: DT_BFLOAT16
10488 #         type: DT_FLOAT
10489 #       }
10490 #     }
10491 #   }
10492 # }
10493 # op {
10494 #   name: "MaxPool3DGradGrad"
10495 #   input_arg {
10496 #     name: "orig_input"
10497 #     type_attr: "T"
10498 #   }
10499 #   input_arg {
10500 #     name: "orig_output"
10501 #     type_attr: "T"
10502 #   }
10503 #   input_arg {
10504 #     name: "grad"
10505 #     type_attr: "T"
10506 #   }
10507 #   output_arg {
10508 #     name: "output"
10509 #     type_attr: "T"
10510 #   }
10511 #   attr {
10512 #     name: "ksize"
10513 #     type: "list(int)"
10514 #     has_minimum: true
10515 #     minimum: 5
10516 #   }
10517 #   attr {
10518 #     name: "strides"
10519 #     type: "list(int)"
10520 #     has_minimum: true
10521 #     minimum: 5
10522 #   }
10523 #   attr {
10524 #     name: "padding"
10525 #     type: "string"
10526 #     allowed_values {
10527 #       list {
10528 #         s: "SAME"
10529 #         s: "VALID"
10530 #       }
10531 #     }
10532 #   }
10533 #   attr {
10534 #     name: "data_format"
10535 #     type: "string"
10536 #     default_value {
10537 #       s: "NDHWC"
10538 #     }
10539 #     allowed_values {
10540 #       list {
10541 #         s: "NDHWC"
10542 #         s: "NCDHW"
10543 #       }
10544 #     }
10545 #   }
10546 #   attr {
10547 #     name: "T"
10548 #     type: "type"
10549 #     allowed_values {
10550 #       list {
10551 #         type: DT_FLOAT
10552 #         type: DT_DOUBLE
10553 #         type: DT_INT32
10554 #         type: DT_UINT8
10555 #         type: DT_INT16
10556 #         type: DT_INT8
10557 #         type: DT_INT64
10558 #         type: DT_BFLOAT16
10559 #         type: DT_UINT16
10560 #         type: DT_HALF
10561 #         type: DT_UINT32
10562 #         type: DT_UINT64
10563 #       }
10564 #     }
10565 #   }
10566 # }
10567 # op {
10568 #   name: "MaxPoolGrad"
10569 #   input_arg {
10570 #     name: "orig_input"
10571 #     type_attr: "T"
10572 #   }
10573 #   input_arg {
10574 #     name: "orig_output"
10575 #     type_attr: "T"
10576 #   }
10577 #   input_arg {
10578 #     name: "grad"
10579 #     type_attr: "T"
10580 #   }
10581 #   output_arg {
10582 #     name: "output"
10583 #     type_attr: "T"
10584 #   }
10585 #   attr {
10586 #     name: "ksize"
10587 #     type: "list(int)"
10588 #     has_minimum: true
10589 #     minimum: 4
10590 #   }
10591 #   attr {
10592 #     name: "strides"
10593 #     type: "list(int)"
10594 #     has_minimum: true
10595 #     minimum: 4
10596 #   }
10597 #   attr {
10598 #     name: "padding"
10599 #     type: "string"
10600 #     allowed_values {
10601 #       list {
10602 #         s: "SAME"
10603 #         s: "VALID"
10604 #       }
10605 #     }
10606 #   }
10607 #   attr {
10608 #     name: "data_format"
10609 #     type: "string"
10610 #     default_value {
10611 #       s: "NHWC"
10612 #     }
10613 #     allowed_values {
10614 #       list {
10615 #         s: "NHWC"
10616 #         s: "NCHW"
10617 #       }
10618 #     }
10619 #   }
10620 #   attr {
10621 #     name: "T"
10622 #     type: "type"
10623 #     default_value {
10624 #       type: DT_FLOAT
10625 #     }
10626 #     allowed_values {
10627 #       list {
10628 #         type: DT_FLOAT
10629 #         type: DT_DOUBLE
10630 #         type: DT_INT32
10631 #         type: DT_UINT8
10632 #         type: DT_INT16
10633 #         type: DT_INT8
10634 #         type: DT_INT64
10635 #         type: DT_BFLOAT16
10636 #         type: DT_UINT16
10637 #         type: DT_HALF
10638 #         type: DT_UINT32
10639 #         type: DT_UINT64
10640 #       }
10641 #     }
10642 #   }
10643 # }
10644 # op {
10645 #   name: "MaxPoolGradGrad"
10646 #   input_arg {
10647 #     name: "orig_input"
10648 #     type_attr: "T"
10649 #   }
10650 #   input_arg {
10651 #     name: "orig_output"
10652 #     type_attr: "T"
10653 #   }
10654 #   input_arg {
10655 #     name: "grad"
10656 #     type_attr: "T"
10657 #   }
10658 #   output_arg {
10659 #     name: "output"
10660 #     type_attr: "T"
10661 #   }
10662 #   attr {
10663 #     name: "ksize"
10664 #     type: "list(int)"
10665 #     has_minimum: true
10666 #     minimum: 4
10667 #   }
10668 #   attr {
10669 #     name: "strides"
10670 #     type: "list(int)"
10671 #     has_minimum: true
10672 #     minimum: 4
10673 #   }
10674 #   attr {
10675 #     name: "padding"
10676 #     type: "string"
10677 #     allowed_values {
10678 #       list {
10679 #         s: "SAME"
10680 #         s: "VALID"
10681 #       }
10682 #     }
10683 #   }
10684 #   attr {
10685 #     name: "data_format"
10686 #     type: "string"
10687 #     default_value {
10688 #       s: "NHWC"
10689 #     }
10690 #     allowed_values {
10691 #       list {
10692 #         s: "NHWC"
10693 #         s: "NCHW"
10694 #       }
10695 #     }
10696 #   }
10697 #   attr {
10698 #     name: "T"
10699 #     type: "type"
10700 #     allowed_values {
10701 #       list {
10702 #         type: DT_FLOAT
10703 #         type: DT_DOUBLE
10704 #         type: DT_INT32
10705 #         type: DT_UINT8
10706 #         type: DT_INT16
10707 #         type: DT_INT8
10708 #         type: DT_INT64
10709 #         type: DT_BFLOAT16
10710 #         type: DT_UINT16
10711 #         type: DT_HALF
10712 #         type: DT_UINT32
10713 #         type: DT_UINT64
10714 #       }
10715 #     }
10716 #   }
10717 # }
10718 # op {
10719 #   name: "MaxPoolGradGradV2"
10720 #   input_arg {
10721 #     name: "orig_input"
10722 #     type_attr: "T"
10723 #   }
10724 #   input_arg {
10725 #     name: "orig_output"
10726 #     type_attr: "T"
10727 #   }
10728 #   input_arg {
10729 #     name: "grad"
10730 #     type_attr: "T"
10731 #   }
10732 #   input_arg {
10733 #     name: "ksize"
10734 #     type: DT_INT32
10735 #   }
10736 #   input_arg {
10737 #     name: "strides"
10738 #     type: DT_INT32
10739 #   }
10740 #   output_arg {
10741 #     name: "output"
10742 #     type_attr: "T"
10743 #   }
10744 #   attr {
10745 #     name: "padding"
10746 #     type: "string"
10747 #     allowed_values {
10748 #       list {
10749 #         s: "SAME"
10750 #         s: "VALID"
10751 #       }
10752 #     }
10753 #   }
10754 #   attr {
10755 #     name: "data_format"
10756 #     type: "string"
10757 #     default_value {
10758 #       s: "NHWC"
10759 #     }
10760 #     allowed_values {
10761 #       list {
10762 #         s: "NHWC"
10763 #         s: "NCHW"
10764 #       }
10765 #     }
10766 #   }
10767 #   attr {
10768 #     name: "T"
10769 #     type: "type"
10770 #     allowed_values {
10771 #       list {
10772 #         type: DT_FLOAT
10773 #         type: DT_DOUBLE
10774 #         type: DT_INT32
10775 #         type: DT_UINT8
10776 #         type: DT_INT16
10777 #         type: DT_INT8
10778 #         type: DT_INT64
10779 #         type: DT_BFLOAT16
10780 #         type: DT_UINT16
10781 #         type: DT_HALF
10782 #         type: DT_UINT32
10783 #         type: DT_UINT64
10784 #       }
10785 #     }
10786 #   }
10787 # }
10788 # op {
10789 #   name: "MaxPoolGradGradWithArgmax"
10790 #   input_arg {
10791 #     name: "input"
10792 #     type_attr: "T"
10793 #   }
10794 #   input_arg {
10795 #     name: "grad"
10796 #     type_attr: "T"
10797 #   }
10798 #   input_arg {
10799 #     name: "argmax"
10800 #     type_attr: "Targmax"
10801 #   }
10802 #   output_arg {
10803 #     name: "output"
10804 #     type_attr: "T"
10805 #   }
10806 #   attr {
10807 #     name: "ksize"
10808 #     type: "list(int)"
10809 #     has_minimum: true
10810 #     minimum: 4
10811 #   }
10812 #   attr {
10813 #     name: "strides"
10814 #     type: "list(int)"
10815 #     has_minimum: true
10816 #     minimum: 4
10817 #   }
10818 #   attr {
10819 #     name: "padding"
10820 #     type: "string"
10821 #     allowed_values {
10822 #       list {
10823 #         s: "SAME"
10824 #         s: "VALID"
10825 #       }
10826 #     }
10827 #   }
10828 #   attr {
10829 #     name: "Targmax"
10830 #     type: "type"
10831 #     allowed_values {
10832 #       list {
10833 #         type: DT_INT32
10834 #         type: DT_INT64
10835 #       }
10836 #     }
10837 #   }
10838 #   attr {
10839 #     name: "T"
10840 #     type: "type"
10841 #     allowed_values {
10842 #       list {
10843 #         type: DT_FLOAT
10844 #         type: DT_DOUBLE
10845 #         type: DT_INT32
10846 #         type: DT_UINT8
10847 #         type: DT_INT16
10848 #         type: DT_INT8
10849 #         type: DT_INT64
10850 #         type: DT_BFLOAT16
10851 #         type: DT_UINT16
10852 #         type: DT_HALF
10853 #         type: DT_UINT32
10854 #         type: DT_UINT64
10855 #       }
10856 #     }
10857 #   }
10858 # }
10859 # op {
10860 #   name: "MaxPoolGradV2"
10861 #   input_arg {
10862 #     name: "orig_input"
10863 #     type_attr: "T"
10864 #   }
10865 #   input_arg {
10866 #     name: "orig_output"
10867 #     type_attr: "T"
10868 #   }
10869 #   input_arg {
10870 #     name: "grad"
10871 #     type_attr: "T"
10872 #   }
10873 #   input_arg {
10874 #     name: "ksize"
10875 #     type: DT_INT32
10876 #   }
10877 #   input_arg {
10878 #     name: "strides"
10879 #     type: DT_INT32
10880 #   }
10881 #   output_arg {
10882 #     name: "output"
10883 #     type_attr: "T"
10884 #   }
10885 #   attr {
10886 #     name: "padding"
10887 #     type: "string"
10888 #     allowed_values {
10889 #       list {
10890 #         s: "SAME"
10891 #         s: "VALID"
10892 #       }
10893 #     }
10894 #   }
10895 #   attr {
10896 #     name: "data_format"
10897 #     type: "string"
10898 #     default_value {
10899 #       s: "NHWC"
10900 #     }
10901 #     allowed_values {
10902 #       list {
10903 #         s: "NHWC"
10904 #         s: "NCHW"
10905 #       }
10906 #     }
10907 #   }
10908 #   attr {
10909 #     name: "T"
10910 #     type: "type"
10911 #     default_value {
10912 #       type: DT_FLOAT
10913 #     }
10914 #     allowed_values {
10915 #       list {
10916 #         type: DT_FLOAT
10917 #         type: DT_DOUBLE
10918 #         type: DT_INT32
10919 #         type: DT_UINT8
10920 #         type: DT_INT16
10921 #         type: DT_INT8
10922 #         type: DT_INT64
10923 #         type: DT_BFLOAT16
10924 #         type: DT_UINT16
10925 #         type: DT_HALF
10926 #         type: DT_UINT32
10927 #         type: DT_UINT64
10928 #       }
10929 #     }
10930 #   }
10931 # }
10932 # op {
10933 #   name: "MaxPoolGradWithArgmax"
10934 #   input_arg {
10935 #     name: "input"
10936 #     type_attr: "T"
10937 #   }
10938 #   input_arg {
10939 #     name: "grad"
10940 #     type_attr: "T"
10941 #   }
10942 #   input_arg {
10943 #     name: "argmax"
10944 #     type_attr: "Targmax"
10945 #   }
10946 #   output_arg {
10947 #     name: "output"
10948 #     type_attr: "T"
10949 #   }
10950 #   attr {
10951 #     name: "ksize"
10952 #     type: "list(int)"
10953 #     has_minimum: true
10954 #     minimum: 4
10955 #   }
10956 #   attr {
10957 #     name: "strides"
10958 #     type: "list(int)"
10959 #     has_minimum: true
10960 #     minimum: 4
10961 #   }
10962 #   attr {
10963 #     name: "padding"
10964 #     type: "string"
10965 #     allowed_values {
10966 #       list {
10967 #         s: "SAME"
10968 #         s: "VALID"
10969 #       }
10970 #     }
10971 #   }
10972 #   attr {
10973 #     name: "Targmax"
10974 #     type: "type"
10975 #     allowed_values {
10976 #       list {
10977 #         type: DT_INT32
10978 #         type: DT_INT64
10979 #       }
10980 #     }
10981 #   }
10982 #   attr {
10983 #     name: "T"
10984 #     type: "type"
10985 #     allowed_values {
10986 #       list {
10987 #         type: DT_FLOAT
10988 #         type: DT_DOUBLE
10989 #         type: DT_INT32
10990 #         type: DT_UINT8
10991 #         type: DT_INT16
10992 #         type: DT_INT8
10993 #         type: DT_INT64
10994 #         type: DT_BFLOAT16
10995 #         type: DT_UINT16
10996 #         type: DT_HALF
10997 #         type: DT_UINT32
10998 #         type: DT_UINT64
10999 #       }
11000 #     }
11001 #   }
11002 # }
11003 # op {
11004 #   name: "MaxPoolV2"
11005 #   input_arg {
11006 #     name: "input"
11007 #     type_attr: "T"
11008 #   }
11009 #   input_arg {
11010 #     name: "ksize"
11011 #     type: DT_INT32
11012 #   }
11013 #   input_arg {
11014 #     name: "strides"
11015 #     type: DT_INT32
11016 #   }
11017 #   output_arg {
11018 #     name: "output"
11019 #     type_attr: "T"
11020 #   }
11021 #   attr {
11022 #     name: "T"
11023 #     type: "type"
11024 #     default_value {
11025 #       type: DT_FLOAT
11026 #     }
11027 #     allowed_values {
11028 #       list {
11029 #         type: DT_HALF
11030 #         type: DT_BFLOAT16
11031 #         type: DT_FLOAT
11032 #         type: DT_DOUBLE
11033 #         type: DT_INT32
11034 #         type: DT_INT64
11035 #         type: DT_UINT8
11036 #         type: DT_INT16
11037 #         type: DT_INT8
11038 #         type: DT_UINT16
11039 #         type: DT_QINT8
11040 #       }
11041 #     }
11042 #   }
11043 #   attr {
11044 #     name: "padding"
11045 #     type: "string"
11046 #     allowed_values {
11047 #       list {
11048 #         s: "SAME"
11049 #         s: "VALID"
11050 #       }
11051 #     }
11052 #   }
11053 #   attr {
11054 #     name: "data_format"
11055 #     type: "string"
11056 #     default_value {
11057 #       s: "NHWC"
11058 #     }
11059 #     allowed_values {
11060 #       list {
11061 #         s: "NHWC"
11062 #         s: "NCHW"
11063 #         s: "NCHW_VECT_C"
11064 #       }
11065 #     }
11066 #   }
11067 # }
11068 # op {
11069 #   name: "MaxPoolWithArgmax"
11070 #   input_arg {
11071 #     name: "input"
11072 #     type_attr: "T"
11073 #   }
11074 #   output_arg {
11075 #     name: "output"
11076 #     type_attr: "T"
11077 #   }
11078 #   output_arg {
11079 #     name: "argmax"
11080 #     type_attr: "Targmax"
11081 #   }
11082 #   attr {
11083 #     name: "ksize"
11084 #     type: "list(int)"
11085 #     has_minimum: true
11086 #     minimum: 4
11087 #   }
11088 #   attr {
11089 #     name: "strides"
11090 #     type: "list(int)"
11091 #     has_minimum: true
11092 #     minimum: 4
11093 #   }
11094 #   attr {
11095 #     name: "Targmax"
11096 #     type: "type"
11097 #     default_value {
11098 #       type: DT_INT64
11099 #     }
11100 #     allowed_values {
11101 #       list {
11102 #         type: DT_INT32
11103 #         type: DT_INT64
11104 #       }
11105 #     }
11106 #   }
11107 #   attr {
11108 #     name: "padding"
11109 #     type: "string"
11110 #     allowed_values {
11111 #       list {
11112 #         s: "SAME"
11113 #         s: "VALID"
11114 #       }
11115 #     }
11116 #   }
11117 #   attr {
11118 #     name: "T"
11119 #     type: "type"
11120 #     allowed_values {
11121 #       list {
11122 #         type: DT_FLOAT
11123 #         type: DT_DOUBLE
11124 #         type: DT_INT32
11125 #         type: DT_UINT8
11126 #         type: DT_INT16
11127 #         type: DT_INT8
11128 #         type: DT_INT64
11129 #         type: DT_BFLOAT16
11130 #         type: DT_UINT16
11131 #         type: DT_HALF
11132 #         type: DT_UINT32
11133 #         type: DT_UINT64
11134 #       }
11135 #     }
11136 #   }
11137 # }
11138 # op {
11139 #   name: "NthElement"
11140 #   input_arg {
11141 #     name: "input"
11142 #     type_attr: "T"
11143 #   }
11144 #   input_arg {
11145 #     name: "n"
11146 #     type: DT_INT32
11147 #   }
11148 #   output_arg {
11149 #     name: "values"
11150 #     type_attr: "T"
11151 #   }
11152 #   attr {
11153 #     name: "reverse"
11154 #     type: "bool"
11155 #     default_value {
11156 #       b: false
11157 #     }
11158 #   }
11159 #   attr {
11160 #     name: "T"
11161 #     type: "type"
11162 #     allowed_values {
11163 #       list {
11164 #         type: DT_FLOAT
11165 #         type: DT_DOUBLE
11166 #         type: DT_INT32
11167 #         type: DT_UINT8
11168 #         type: DT_INT16
11169 #         type: DT_INT8
11170 #         type: DT_INT64
11171 #         type: DT_BFLOAT16
11172 #         type: DT_UINT16
11173 #         type: DT_HALF
11174 #         type: DT_UINT32
11175 #         type: DT_UINT64
11176 #       }
11177 #     }
11178 #   }
11179 # }
11180 # op {
11181 #   name: "QuantizedAvgPool"
11182 #   input_arg {
11183 #     name: "input"
11184 #     type_attr: "T"
11185 #   }
11186 #   input_arg {
11187 #     name: "min_input"
11188 #     type: DT_FLOAT
11189 #   }
11190 #   input_arg {
11191 #     name: "max_input"
11192 #     type: DT_FLOAT
11193 #   }
11194 #   output_arg {
11195 #     name: "output"
11196 #     type_attr: "T"
11197 #   }
11198 #   output_arg {
11199 #     name: "min_output"
11200 #     type: DT_FLOAT
11201 #   }
11202 #   output_arg {
11203 #     name: "max_output"
11204 #     type: DT_FLOAT
11205 #   }
11206 #   attr {
11207 #     name: "T"
11208 #     type: "type"
11209 #     allowed_values {
11210 #       list {
11211 #         type: DT_QINT8
11212 #         type: DT_QUINT8
11213 #         type: DT_QINT32
11214 #         type: DT_QINT16
11215 #         type: DT_QUINT16
11216 #       }
11217 #     }
11218 #   }
11219 #   attr {
11220 #     name: "ksize"
11221 #     type: "list(int)"
11222 #   }
11223 #   attr {
11224 #     name: "strides"
11225 #     type: "list(int)"
11226 #   }
11227 #   attr {
11228 #     name: "padding"
11229 #     type: "string"
11230 #     allowed_values {
11231 #       list {
11232 #         s: "SAME"
11233 #         s: "VALID"
11234 #       }
11235 #     }
11236 #   }
11237 # }
11238 # op {
11239 #   name: "QuantizedBatchNormWithGlobalNormalization"
11240 #   input_arg {
11241 #     name: "t"
11242 #     type_attr: "Tinput"
11243 #   }
11244 #   input_arg {
11245 #     name: "t_min"
11246 #     type: DT_FLOAT
11247 #   }
11248 #   input_arg {
11249 #     name: "t_max"
11250 #     type: DT_FLOAT
11251 #   }
11252 #   input_arg {
11253 #     name: "m"
11254 #     type_attr: "Tinput"
11255 #   }
11256 #   input_arg {
11257 #     name: "m_min"
11258 #     type: DT_FLOAT
11259 #   }
11260 #   input_arg {
11261 #     name: "m_max"
11262 #     type: DT_FLOAT
11263 #   }
11264 #   input_arg {
11265 #     name: "v"
11266 #     type_attr: "Tinput"
11267 #   }
11268 #   input_arg {
11269 #     name: "v_min"
11270 #     type: DT_FLOAT
11271 #   }
11272 #   input_arg {
11273 #     name: "v_max"
11274 #     type: DT_FLOAT
11275 #   }
11276 #   input_arg {
11277 #     name: "beta"
11278 #     type_attr: "Tinput"
11279 #   }
11280 #   input_arg {
11281 #     name: "beta_min"
11282 #     type: DT_FLOAT
11283 #   }
11284 #   input_arg {
11285 #     name: "beta_max"
11286 #     type: DT_FLOAT
11287 #   }
11288 #   input_arg {
11289 #     name: "gamma"
11290 #     type_attr: "Tinput"
11291 #   }
11292 #   input_arg {
11293 #     name: "gamma_min"
11294 #     type: DT_FLOAT
11295 #   }
11296 #   input_arg {
11297 #     name: "gamma_max"
11298 #     type: DT_FLOAT
11299 #   }
11300 #   output_arg {
11301 #     name: "result"
11302 #     type_attr: "out_type"
11303 #   }
11304 #   output_arg {
11305 #     name: "result_min"
11306 #     type: DT_FLOAT
11307 #   }
11308 #   output_arg {
11309 #     name: "result_max"
11310 #     type: DT_FLOAT
11311 #   }
11312 #   attr {
11313 #     name: "Tinput"
11314 #     type: "type"
11315 #     allowed_values {
11316 #       list {
11317 #         type: DT_QINT8
11318 #         type: DT_QUINT8
11319 #         type: DT_QINT32
11320 #         type: DT_QINT16
11321 #         type: DT_QUINT16
11322 #       }
11323 #     }
11324 #   }
11325 #   attr {
11326 #     name: "out_type"
11327 #     type: "type"
11328 #     allowed_values {
11329 #       list {
11330 #         type: DT_QINT8
11331 #         type: DT_QUINT8
11332 #         type: DT_QINT32
11333 #         type: DT_QINT16
11334 #         type: DT_QUINT16
11335 #       }
11336 #     }
11337 #   }
11338 #   attr {
11339 #     name: "variance_epsilon"
11340 #     type: "float"
11341 #   }
11342 #   attr {
11343 #     name: "scale_after_normalization"
11344 #     type: "bool"
11345 #   }
11346 # }
11347 # op {
11348 #   name: "QuantizedBiasAdd"
11349 #   input_arg {
11350 #     name: "input"
11351 #     type_attr: "T1"
11352 #   }
11353 #   input_arg {
11354 #     name: "bias"
11355 #     type_attr: "T2"
11356 #   }
11357 #   input_arg {
11358 #     name: "min_input"
11359 #     type: DT_FLOAT
11360 #   }
11361 #   input_arg {
11362 #     name: "max_input"
11363 #     type: DT_FLOAT
11364 #   }
11365 #   input_arg {
11366 #     name: "min_bias"
11367 #     type: DT_FLOAT
11368 #   }
11369 #   input_arg {
11370 #     name: "max_bias"
11371 #     type: DT_FLOAT
11372 #   }
11373 #   output_arg {
11374 #     name: "output"
11375 #     type_attr: "out_type"
11376 #   }
11377 #   output_arg {
11378 #     name: "min_out"
11379 #     type: DT_FLOAT
11380 #   }
11381 #   output_arg {
11382 #     name: "max_out"
11383 #     type: DT_FLOAT
11384 #   }
11385 #   attr {
11386 #     name: "T1"
11387 #     type: "type"
11388 #     allowed_values {
11389 #       list {
11390 #         type: DT_QINT8
11391 #         type: DT_QUINT8
11392 #         type: DT_QINT32
11393 #         type: DT_QINT16
11394 #         type: DT_QUINT16
11395 #       }
11396 #     }
11397 #   }
11398 #   attr {
11399 #     name: "T2"
11400 #     type: "type"
11401 #     allowed_values {
11402 #       list {
11403 #         type: DT_QINT8
11404 #         type: DT_QUINT8
11405 #         type: DT_QINT32
11406 #         type: DT_QINT16
11407 #         type: DT_QUINT16
11408 #       }
11409 #     }
11410 #   }
11411 #   attr {
11412 #     name: "out_type"
11413 #     type: "type"
11414 #     allowed_values {
11415 #       list {
11416 #         type: DT_QINT8
11417 #         type: DT_QUINT8
11418 #         type: DT_QINT32
11419 #         type: DT_QINT16
11420 #         type: DT_QUINT16
11421 #       }
11422 #     }
11423 #   }
11424 # }
11425 # op {
11426 #   name: "QuantizedConv2D"
11427 #   input_arg {
11428 #     name: "input"
11429 #     type_attr: "Tinput"
11430 #   }
11431 #   input_arg {
11432 #     name: "filter"
11433 #     type_attr: "Tfilter"
11434 #   }
11435 #   input_arg {
11436 #     name: "min_input"
11437 #     type: DT_FLOAT
11438 #   }
11439 #   input_arg {
11440 #     name: "max_input"
11441 #     type: DT_FLOAT
11442 #   }
11443 #   input_arg {
11444 #     name: "min_filter"
11445 #     type: DT_FLOAT
11446 #   }
11447 #   input_arg {
11448 #     name: "max_filter"
11449 #     type: DT_FLOAT
11450 #   }
11451 #   output_arg {
11452 #     name: "output"
11453 #     type_attr: "out_type"
11454 #   }
11455 #   output_arg {
11456 #     name: "min_output"
11457 #     type: DT_FLOAT
11458 #   }
11459 #   output_arg {
11460 #     name: "max_output"
11461 #     type: DT_FLOAT
11462 #   }
11463 #   attr {
11464 #     name: "Tinput"
11465 #     type: "type"
11466 #     allowed_values {
11467 #       list {
11468 #         type: DT_QINT8
11469 #         type: DT_QUINT8
11470 #         type: DT_QINT32
11471 #         type: DT_QINT16
11472 #         type: DT_QUINT16
11473 #       }
11474 #     }
11475 #   }
11476 #   attr {
11477 #     name: "Tfilter"
11478 #     type: "type"
11479 #     allowed_values {
11480 #       list {
11481 #         type: DT_QINT8
11482 #         type: DT_QUINT8
11483 #         type: DT_QINT32
11484 #         type: DT_QINT16
11485 #         type: DT_QUINT16
11486 #       }
11487 #     }
11488 #   }
11489 #   attr {
11490 #     name: "out_type"
11491 #     type: "type"
11492 #     default_value {
11493 #       type: DT_QINT32
11494 #     }
11495 #     allowed_values {
11496 #       list {
11497 #         type: DT_QINT8
11498 #         type: DT_QUINT8
11499 #         type: DT_QINT32
11500 #         type: DT_QINT16
11501 #         type: DT_QUINT16
11502 #       }
11503 #     }
11504 #   }
11505 #   attr {
11506 #     name: "strides"
11507 #     type: "list(int)"
11508 #   }
11509 #   attr {
11510 #     name: "padding"
11511 #     type: "string"
11512 #     allowed_values {
11513 #       list {
11514 #         s: "SAME"
11515 #         s: "VALID"
11516 #       }
11517 #     }
11518 #   }
11519 #   attr {
11520 #     name: "dilations"
11521 #     type: "list(int)"
11522 #     default_value {
11523 #       list {
11524 #         i: 1
11525 #         i: 1
11526 #         i: 1
11527 #         i: 1
11528 #       }
11529 #     }
11530 #   }
11531 # }
11532 # op {
11533 #   name: "QuantizedMaxPool"
11534 #   input_arg {
11535 #     name: "input"
11536 #     type_attr: "T"
11537 #   }
11538 #   input_arg {
11539 #     name: "min_input"
11540 #     type: DT_FLOAT
11541 #   }
11542 #   input_arg {
11543 #     name: "max_input"
11544 #     type: DT_FLOAT
11545 #   }
11546 #   output_arg {
11547 #     name: "output"
11548 #     type_attr: "T"
11549 #   }
11550 #   output_arg {
11551 #     name: "min_output"
11552 #     type: DT_FLOAT
11553 #   }
11554 #   output_arg {
11555 #     name: "max_output"
11556 #     type: DT_FLOAT
11557 #   }
11558 #   attr {
11559 #     name: "T"
11560 #     type: "type"
11561 #     allowed_values {
11562 #       list {
11563 #         type: DT_QINT8
11564 #         type: DT_QUINT8
11565 #         type: DT_QINT32
11566 #         type: DT_QINT16
11567 #         type: DT_QUINT16
11568 #       }
11569 #     }
11570 #   }
11571 #   attr {
11572 #     name: "ksize"
11573 #     type: "list(int)"
11574 #   }
11575 #   attr {
11576 #     name: "strides"
11577 #     type: "list(int)"
11578 #   }
11579 #   attr {
11580 #     name: "padding"
11581 #     type: "string"
11582 #     allowed_values {
11583 #       list {
11584 #         s: "SAME"
11585 #         s: "VALID"
11586 #       }
11587 #     }
11588 #   }
11589 # }
11590 # op {
11591 #   name: "QuantizedRelu"
11592 #   input_arg {
11593 #     name: "features"
11594 #     type_attr: "Tinput"
11595 #   }
11596 #   input_arg {
11597 #     name: "min_features"
11598 #     type: DT_FLOAT
11599 #   }
11600 #   input_arg {
11601 #     name: "max_features"
11602 #     type: DT_FLOAT
11603 #   }
11604 #   output_arg {
11605 #     name: "activations"
11606 #     type_attr: "out_type"
11607 #   }
11608 #   output_arg {
11609 #     name: "min_activations"
11610 #     type: DT_FLOAT
11611 #   }
11612 #   output_arg {
11613 #     name: "max_activations"
11614 #     type: DT_FLOAT
11615 #   }
11616 #   attr {
11617 #     name: "Tinput"
11618 #     type: "type"
11619 #     allowed_values {
11620 #       list {
11621 #         type: DT_QINT8
11622 #         type: DT_QUINT8
11623 #         type: DT_QINT32
11624 #         type: DT_QINT16
11625 #         type: DT_QUINT16
11626 #       }
11627 #     }
11628 #   }
11629 #   attr {
11630 #     name: "out_type"
11631 #     type: "type"
11632 #     default_value {
11633 #       type: DT_QUINT8
11634 #     }
11635 #     allowed_values {
11636 #       list {
11637 #         type: DT_QINT8
11638 #         type: DT_QUINT8
11639 #         type: DT_QINT32
11640 #         type: DT_QINT16
11641 #         type: DT_QUINT16
11642 #       }
11643 #     }
11644 #   }
11645 # }
11646 # op {
11647 #   name: "QuantizedRelu6"
11648 #   input_arg {
11649 #     name: "features"
11650 #     type_attr: "Tinput"
11651 #   }
11652 #   input_arg {
11653 #     name: "min_features"
11654 #     type: DT_FLOAT
11655 #   }
11656 #   input_arg {
11657 #     name: "max_features"
11658 #     type: DT_FLOAT
11659 #   }
11660 #   output_arg {
11661 #     name: "activations"
11662 #     type_attr: "out_type"
11663 #   }
11664 #   output_arg {
11665 #     name: "min_activations"
11666 #     type: DT_FLOAT
11667 #   }
11668 #   output_arg {
11669 #     name: "max_activations"
11670 #     type: DT_FLOAT
11671 #   }
11672 #   attr {
11673 #     name: "Tinput"
11674 #     type: "type"
11675 #     allowed_values {
11676 #       list {
11677 #         type: DT_QINT8
11678 #         type: DT_QUINT8
11679 #         type: DT_QINT32
11680 #         type: DT_QINT16
11681 #         type: DT_QUINT16
11682 #       }
11683 #     }
11684 #   }
11685 #   attr {
11686 #     name: "out_type"
11687 #     type: "type"
11688 #     default_value {
11689 #       type: DT_QUINT8
11690 #     }
11691 #     allowed_values {
11692 #       list {
11693 #         type: DT_QINT8
11694 #         type: DT_QUINT8
11695 #         type: DT_QINT32
11696 #         type: DT_QINT16
11697 #         type: DT_QUINT16
11698 #       }
11699 #     }
11700 #   }
11701 # }
11702 # op {
11703 #   name: "QuantizedReluX"
11704 #   input_arg {
11705 #     name: "features"
11706 #     type_attr: "Tinput"
11707 #   }
11708 #   input_arg {
11709 #     name: "max_value"
11710 #     type: DT_FLOAT
11711 #   }
11712 #   input_arg {
11713 #     name: "min_features"
11714 #     type: DT_FLOAT
11715 #   }
11716 #   input_arg {
11717 #     name: "max_features"
11718 #     type: DT_FLOAT
11719 #   }
11720 #   output_arg {
11721 #     name: "activations"
11722 #     type_attr: "out_type"
11723 #   }
11724 #   output_arg {
11725 #     name: "min_activations"
11726 #     type: DT_FLOAT
11727 #   }
11728 #   output_arg {
11729 #     name: "max_activations"
11730 #     type: DT_FLOAT
11731 #   }
11732 #   attr {
11733 #     name: "Tinput"
11734 #     type: "type"
11735 #     allowed_values {
11736 #       list {
11737 #         type: DT_QINT8
11738 #         type: DT_QUINT8
11739 #         type: DT_QINT32
11740 #         type: DT_QINT16
11741 #         type: DT_QUINT16
11742 #       }
11743 #     }
11744 #   }
11745 #   attr {
11746 #     name: "out_type"
11747 #     type: "type"
11748 #     default_value {
11749 #       type: DT_QUINT8
11750 #     }
11751 #     allowed_values {
11752 #       list {
11753 #         type: DT_QINT8
11754 #         type: DT_QUINT8
11755 #         type: DT_QINT32
11756 #         type: DT_QINT16
11757 #         type: DT_QUINT16
11758 #       }
11759 #     }
11760 #   }
11761 # }
11762 # op {
11763 #   name: "Relu"
11764 #   input_arg {
11765 #     name: "features"
11766 #     type_attr: "T"
11767 #   }
11768 #   output_arg {
11769 #     name: "activations"
11770 #     type_attr: "T"
11771 #   }
11772 #   attr {
11773 #     name: "T"
11774 #     type: "type"
11775 #     allowed_values {
11776 #       list {
11777 #         type: DT_FLOAT
11778 #         type: DT_DOUBLE
11779 #         type: DT_INT32
11780 #         type: DT_UINT8
11781 #         type: DT_INT16
11782 #         type: DT_INT8
11783 #         type: DT_INT64
11784 #         type: DT_BFLOAT16
11785 #         type: DT_UINT16
11786 #         type: DT_HALF
11787 #         type: DT_UINT32
11788 #         type: DT_UINT64
11789 #         type: DT_QINT8
11790 #       }
11791 #     }
11792 #   }
11793 # }
11794 # op {
11795 #   name: "Relu6"
11796 #   input_arg {
11797 #     name: "features"
11798 #     type_attr: "T"
11799 #   }
11800 #   output_arg {
11801 #     name: "activations"
11802 #     type_attr: "T"
11803 #   }
11804 #   attr {
11805 #     name: "T"
11806 #     type: "type"
11807 #     allowed_values {
11808 #       list {
11809 #         type: DT_FLOAT
11810 #         type: DT_DOUBLE
11811 #         type: DT_INT32
11812 #         type: DT_UINT8
11813 #         type: DT_INT16
11814 #         type: DT_INT8
11815 #         type: DT_INT64
11816 #         type: DT_BFLOAT16
11817 #         type: DT_UINT16
11818 #         type: DT_HALF
11819 #         type: DT_UINT32
11820 #         type: DT_UINT64
11821 #       }
11822 #     }
11823 #   }
11824 # }
11825 # op {
11826 #   name: "Relu6Grad"
11827 #   input_arg {
11828 #     name: "gradients"
11829 #     type_attr: "T"
11830 #   }
11831 #   input_arg {
11832 #     name: "features"
11833 #     type_attr: "T"
11834 #   }
11835 #   output_arg {
11836 #     name: "backprops"
11837 #     type_attr: "T"
11838 #   }
11839 #   attr {
11840 #     name: "T"
11841 #     type: "type"
11842 #     allowed_values {
11843 #       list {
11844 #         type: DT_FLOAT
11845 #         type: DT_DOUBLE
11846 #         type: DT_INT32
11847 #         type: DT_UINT8
11848 #         type: DT_INT16
11849 #         type: DT_INT8
11850 #         type: DT_INT64
11851 #         type: DT_BFLOAT16
11852 #         type: DT_UINT16
11853 #         type: DT_HALF
11854 #         type: DT_UINT32
11855 #         type: DT_UINT64
11856 #       }
11857 #     }
11858 #   }
11859 # }
11860 # op {
11861 #   name: "ReluGrad"
11862 #   input_arg {
11863 #     name: "gradients"
11864 #     type_attr: "T"
11865 #   }
11866 #   input_arg {
11867 #     name: "features"
11868 #     type_attr: "T"
11869 #   }
11870 #   output_arg {
11871 #     name: "backprops"
11872 #     type_attr: "T"
11873 #   }
11874 #   attr {
11875 #     name: "T"
11876 #     type: "type"
11877 #     allowed_values {
11878 #       list {
11879 #         type: DT_FLOAT
11880 #         type: DT_DOUBLE
11881 #         type: DT_INT32
11882 #         type: DT_UINT8
11883 #         type: DT_INT16
11884 #         type: DT_INT8
11885 #         type: DT_INT64
11886 #         type: DT_BFLOAT16
11887 #         type: DT_UINT16
11888 #         type: DT_HALF
11889 #         type: DT_UINT32
11890 #         type: DT_UINT64
11891 #       }
11892 #     }
11893 #   }
11894 # }
11895 # op {
11896 #   name: "Selu"
11897 #   input_arg {
11898 #     name: "features"
11899 #     type_attr: "T"
11900 #   }
11901 #   output_arg {
11902 #     name: "activations"
11903 #     type_attr: "T"
11904 #   }
11905 #   attr {
11906 #     name: "T"
11907 #     type: "type"
11908 #     allowed_values {
11909 #       list {
11910 #         type: DT_HALF
11911 #         type: DT_BFLOAT16
11912 #         type: DT_FLOAT
11913 #         type: DT_DOUBLE
11914 #       }
11915 #     }
11916 #   }
11917 # }
11918 # op {
11919 #   name: "SeluGrad"
11920 #   input_arg {
11921 #     name: "gradients"
11922 #     type_attr: "T"
11923 #   }
11924 #   input_arg {
11925 #     name: "outputs"
11926 #     type_attr: "T"
11927 #   }
11928 #   output_arg {
11929 #     name: "backprops"
11930 #     type_attr: "T"
11931 #   }
11932 #   attr {
11933 #     name: "T"
11934 #     type: "type"
11935 #     allowed_values {
11936 #       list {
11937 #         type: DT_HALF
11938 #         type: DT_BFLOAT16
11939 #         type: DT_FLOAT
11940 #         type: DT_DOUBLE
11941 #       }
11942 #     }
11943 #   }
11944 # }
11945 # op {
11946 #   name: "Softmax"
11947 #   input_arg {
11948 #     name: "logits"
11949 #     type_attr: "T"
11950 #   }
11951 #   output_arg {
11952 #     name: "softmax"
11953 #     type_attr: "T"
11954 #   }
11955 #   attr {
11956 #     name: "T"
11957 #     type: "type"
11958 #     allowed_values {
11959 #       list {
11960 #         type: DT_HALF
11961 #         type: DT_BFLOAT16
11962 #         type: DT_FLOAT
11963 #         type: DT_DOUBLE
11964 #       }
11965 #     }
11966 #   }
11967 # }
11968 # op {
11969 #   name: "SoftmaxCrossEntropyWithLogits"
11970 #   input_arg {
11971 #     name: "features"
11972 #     type_attr: "T"
11973 #   }
11974 #   input_arg {
11975 #     name: "labels"
11976 #     type_attr: "T"
11977 #   }
11978 #   output_arg {
11979 #     name: "loss"
11980 #     type_attr: "T"
11981 #   }
11982 #   output_arg {
11983 #     name: "backprop"
11984 #     type_attr: "T"
11985 #   }
11986 #   attr {
11987 #     name: "T"
11988 #     type: "type"
11989 #     allowed_values {
11990 #       list {
11991 #         type: DT_HALF
11992 #         type: DT_BFLOAT16
11993 #         type: DT_FLOAT
11994 #         type: DT_DOUBLE
11995 #       }
11996 #     }
11997 #   }
11998 # }
11999 # op {
12000 #   name: "Softplus"
12001 #   input_arg {
12002 #     name: "features"
12003 #     type_attr: "T"
12004 #   }
12005 #   output_arg {
12006 #     name: "activations"
12007 #     type_attr: "T"
12008 #   }
12009 #   attr {
12010 #     name: "T"
12011 #     type: "type"
12012 #     allowed_values {
12013 #       list {
12014 #         type: DT_HALF
12015 #         type: DT_BFLOAT16
12016 #         type: DT_FLOAT
12017 #         type: DT_DOUBLE
12018 #       }
12019 #     }
12020 #   }
12021 # }
12022 # op {
12023 #   name: "SoftplusGrad"
12024 #   input_arg {
12025 #     name: "gradients"
12026 #     type_attr: "T"
12027 #   }
12028 #   input_arg {
12029 #     name: "features"
12030 #     type_attr: "T"
12031 #   }
12032 #   output_arg {
12033 #     name: "backprops"
12034 #     type_attr: "T"
12035 #   }
12036 #   attr {
12037 #     name: "T"
12038 #     type: "type"
12039 #     allowed_values {
12040 #       list {
12041 #         type: DT_HALF
12042 #         type: DT_BFLOAT16
12043 #         type: DT_FLOAT
12044 #         type: DT_DOUBLE
12045 #       }
12046 #     }
12047 #   }
12048 # }
12049 # op {
12050 #   name: "Softsign"
12051 #   input_arg {
12052 #     name: "features"
12053 #     type_attr: "T"
12054 #   }
12055 #   output_arg {
12056 #     name: "activations"
12057 #     type_attr: "T"
12058 #   }
12059 #   attr {
12060 #     name: "T"
12061 #     type: "type"
12062 #     allowed_values {
12063 #       list {
12064 #         type: DT_HALF
12065 #         type: DT_BFLOAT16
12066 #         type: DT_FLOAT
12067 #         type: DT_DOUBLE
12068 #       }
12069 #     }
12070 #   }
12071 # }
12072 # op {
12073 #   name: "SoftsignGrad"
12074 #   input_arg {
12075 #     name: "gradients"
12076 #     type_attr: "T"
12077 #   }
12078 #   input_arg {
12079 #     name: "features"
12080 #     type_attr: "T"
12081 #   }
12082 #   output_arg {
12083 #     name: "backprops"
12084 #     type_attr: "T"
12085 #   }
12086 #   attr {
12087 #     name: "T"
12088 #     type: "type"
12089 #     allowed_values {
12090 #       list {
12091 #         type: DT_HALF
12092 #         type: DT_BFLOAT16
12093 #         type: DT_FLOAT
12094 #         type: DT_DOUBLE
12095 #       }
12096 #     }
12097 #   }
12098 # }
12099 # op {
12100 #   name: "SparseSoftmaxCrossEntropyWithLogits"
12101 #   input_arg {
12102 #     name: "features"
12103 #     type_attr: "T"
12104 #   }
12105 #   input_arg {
12106 #     name: "labels"
12107 #     type_attr: "Tlabels"
12108 #   }
12109 #   output_arg {
12110 #     name: "loss"
12111 #     type_attr: "T"
12112 #   }
12113 #   output_arg {
12114 #     name: "backprop"
12115 #     type_attr: "T"
12116 #   }
12117 #   attr {
12118 #     name: "T"
12119 #     type: "type"
12120 #     allowed_values {
12121 #       list {
12122 #         type: DT_HALF
12123 #         type: DT_BFLOAT16
12124 #         type: DT_FLOAT
12125 #         type: DT_DOUBLE
12126 #       }
12127 #     }
12128 #   }
12129 #   attr {
12130 #     name: "Tlabels"
12131 #     type: "type"
12132 #     default_value {
12133 #       type: DT_INT64
12134 #     }
12135 #     allowed_values {
12136 #       list {
12137 #         type: DT_INT32
12138 #         type: DT_INT64
12139 #       }
12140 #     }
12141 #   }
12142 # }
12143 # op {
12144 #   name: "TopK"
12145 #   input_arg {
12146 #     name: "input"
12147 #     type_attr: "T"
12148 #   }
12149 #   output_arg {
12150 #     name: "values"
12151 #     type_attr: "T"
12152 #   }
12153 #   output_arg {
12154 #     name: "indices"
12155 #     type: DT_INT32
12156 #   }
12157 #   attr {
12158 #     name: "k"
12159 #     type: "int"
12160 #     has_minimum: true
12161 #   }
12162 #   attr {
12163 #     name: "sorted"
12164 #     type: "bool"
12165 #     default_value {
12166 #       b: true
12167 #     }
12168 #   }
12169 #   attr {
12170 #     name: "T"
12171 #     type: "type"
12172 #     allowed_values {
12173 #       list {
12174 #         type: DT_FLOAT
12175 #         type: DT_DOUBLE
12176 #         type: DT_INT32
12177 #         type: DT_UINT8
12178 #         type: DT_INT16
12179 #         type: DT_INT8
12180 #         type: DT_INT64
12181 #         type: DT_BFLOAT16
12182 #         type: DT_UINT16
12183 #         type: DT_HALF
12184 #         type: DT_UINT32
12185 #         type: DT_UINT64
12186 #       }
12187 #     }
12188 #   }
12189 #   deprecation {
12190 #     version: 7
12191 #     explanation: "Use TopKV2 instead"
12192 #   }
12193 # }
12194 # op {
12195 #   name: "TopKV2"
12196 #   input_arg {
12197 #     name: "input"
12198 #     type_attr: "T"
12199 #   }
12200 #   input_arg {
12201 #     name: "k"
12202 #     type: DT_INT32
12203 #   }
12204 #   output_arg {
12205 #     name: "values"
12206 #     type_attr: "T"
12207 #   }
12208 #   output_arg {
12209 #     name: "indices"
12210 #     type: DT_INT32
12211 #   }
12212 #   attr {
12213 #     name: "sorted"
12214 #     type: "bool"
12215 #     default_value {
12216 #       b: true
12217 #     }
12218 #   }
12219 #   attr {
12220 #     name: "T"
12221 #     type: "type"
12222 #     allowed_values {
12223 #       list {
12224 #         type: DT_FLOAT
12225 #         type: DT_DOUBLE
12226 #         type: DT_INT32
12227 #         type: DT_UINT8
12228 #         type: DT_INT16
12229 #         type: DT_INT8
12230 #         type: DT_INT64
12231 #         type: DT_BFLOAT16
12232 #         type: DT_UINT16
12233 #         type: DT_HALF
12234 #         type: DT_UINT32
12235 #         type: DT_UINT64
12236 #       }
12237 #     }
12238 #   }
12239 # }
12240 _op_def_lib = _InitOpDefLibrary(b"\n\274\001\n\007AvgPool\022\n\n\005value\"\001T\032\013\n\006output\"\001T\"\026\n\005ksize\022\tlist(int)(\0010\004\"\030\n\007strides\022\tlist(int)(\0010\004\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\"-\n\013data_format\022\006string\032\006\022\004NHWC:\016\n\014\022\004NHWC\022\004NCHW\"\023\n\001T\022\004type:\010\n\0062\004\023\016\001\002\n\301\001\n\tAvgPool3D\022\n\n\005input\"\001T\032\013\n\006output\"\001T\"\026\n\005ksize\022\tlist(int)(\0010\005\"\030\n\007strides\022\tlist(int)(\0010\005\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\"0\n\013data_format\022\006string\032\007\022\005NDHWC:\020\n\016\022\005NDHWC\022\005NCDHW\"\023\n\001T\022\004type:\010\n\0062\004\023\016\001\002\n\332\001\n\rAvgPool3DGrad\022\024\n\020orig_input_shape\030\003\022\t\n\004grad\"\001T\032\013\n\006output\"\001T\"\026\n\005ksize\022\tlist(int)(\0010\005\"\030\n\007strides\022\tlist(int)(\0010\005\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\"0\n\013data_format\022\006string\032\007\022\005NDHWC:\020\n\016\022\005NDHWC\022\005NCDHW\"\023\n\001T\022\004type:\010\n\0062\004\023\016\001\002\n\325\001\n\013AvgPoolGrad\022\024\n\020orig_input_shape\030\003\022\t\n\004grad\"\001T\032\013\n\006output\"\001T\"\026\n\005ksize\022\tlist(int)(\0010\004\"\030\n\007strides\022\tlist(int)(\0010\004\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\"-\n\013data_format\022\006string\032\006\022\004NHWC:\016\n\014\022\004NHWC\022\004NCHW\"\023\n\001T\022\004type:\010\n\0062\004\023\016\001\002\n\343\001\n BatchNormWithGlobalNormalization\022\006\n\001t\"\001T\022\006\n\001m\"\001T\022\006\n\001v\"\001T\022\t\n\004beta\"\001T\022\n\n\005gamma\"\001T\032\013\n\006result\"\001T\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\031\n\020variance_epsilon\022\005float\"!\n\031scale_after_normalization\022\004boolB#\010\t\022\037Use tf.nn.batch_normalization()\n\213\002\n$BatchNormWithGlobalNormalizationGrad\022\006\n\001t\"\001T\022\006\n\001m\"\001T\022\006\n\001v\"\001T\022\n\n\005gamma\"\001T\022\r\n\010backprop\"\001T\032\007\n\002dx\"\001T\032\007\n\002dm\"\001T\032\007\n\002dv\"\001T\032\007\n\002db\"\001T\032\007\n\002dg\"\001T\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\031\n\020variance_epsilon\022\005float\"!\n\031scale_after_normalization\022\004boolB#\010\t\022\037Use tf.nn.batch_normalization()\n~\n\007BiasAdd\022\n\n\005value\"\001T\022\t\n\004bias\"\001T\032\013\n\006output\"\001T\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"-\n\013data_format\022\006string\032\006\022\004NHWC:\016\n\014\022\004NHWC\022\004NCHW\n~\n\013BiasAddGrad\022\021\n\014out_backprop\"\001T\032\013\n\006output\"\001T\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"-\n\013data_format\022\006string\032\006\022\004NHWC:\016\n\014\022\004NHWC\022\004NCHW\nQ\n\tBiasAddV1\022\n\n\005value\"\001T\022\t\n\004bias\"\001T\032\013\n\006output\"\001T\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\n\354\001\n\006Conv2D\022\n\n\005input\"\001T\022\013\n\006filter\"\001T\032\013\n\006output\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\023\016\001\002\"\024\n\007strides\022\tlist(int)\"\034\n\020use_cudnn_on_gpu\022\004bool\032\002(\001\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\"-\n\013data_format\022\006string\032\006\022\004NHWC:\016\n\014\022\004NHWC\022\004NCHW\" \n\tdilations\022\tlist(int)\032\010\n\006\032\004\001\001\001\001\n\222\002\n\024Conv2DBackpropFilter\022\n\n\005input\"\001T\022\020\n\014filter_sizes\030\003\022\021\n\014out_backprop\"\001T\032\013\n\006output\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\023\016\001\002\"\024\n\007strides\022\tlist(int)\"\034\n\020use_cudnn_on_gpu\022\004bool\032\002(\001\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\"-\n\013data_format\022\006string\032\006\022\004NHWC:\016\n\014\022\004NHWC\022\004NCHW\" \n\tdilations\022\tlist(int)\032\010\n\006\032\004\001\001\001\001\n\221\002\n\023Conv2DBackpropInput\022\017\n\013input_sizes\030\003\022\013\n\006filter\"\001T\022\021\n\014out_backprop\"\001T\032\013\n\006output\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\023\016\001\002\"\024\n\007strides\022\tlist(int)\"\034\n\020use_cudnn_on_gpu\022\004bool\032\002(\001\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\"-\n\013data_format\022\006string\032\006\022\004NHWC:\016\n\014\022\004NHWC\022\004NCHW\" \n\tdilations\022\tlist(int)\032\010\n\006\032\004\001\001\001\001\n\326\001\n\006Conv3D\022\n\n\005input\"\001T\022\013\n\006filter\"\001T\032\013\n\006output\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\023\016\001\002\"\030\n\007strides\022\tlist(int)(\0010\005\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\"0\n\013data_format\022\006string\032\007\022\005NDHWC:\020\n\016\022\005NDHWC\022\005NCDHW\"!\n\tdilations\022\tlist(int)\032\t\n\007\032\005\001\001\001\001\001\n\344\001\n\024Conv3DBackpropFilter\022\n\n\005input\"\001T\022\013\n\006filter\"\001T\022\021\n\014out_backprop\"\001T\032\013\n\006output\"\001T\"\022\n\001T\022\004type:\007\n\0052\003\023\001\002\"\030\n\007strides\022\tlist(int)(\0010\005\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\"!\n\tdilations\022\tlist(int)\032\t\n\007\032\005\001\001\001\001\001B\036\010\n\022\032Use Conv3DBackpropFilterV2\n\376\001\n\026Conv3DBackpropFilterV2\022\n\n\005input\"\001T\022\020\n\014filter_sizes\030\003\022\021\n\014out_backprop\"\001T\032\013\n\006output\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\023\016\001\002\"\030\n\007strides\022\tlist(int)(\0010\005\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\"0\n\013data_format\022\006string\032\007\022\005NDHWC:\020\n\016\022\005NDHWC\022\005NCDHW\"!\n\tdilations\022\tlist(int)\032\t\n\007\032\005\001\001\001\001\001\n\342\001\n\023Conv3DBackpropInput\022\n\n\005input\"\001T\022\013\n\006filter\"\001T\022\021\n\014out_backprop\"\001T\032\013\n\006output\"\001T\"\022\n\001T\022\004type:\007\n\0052\003\023\001\002\"\030\n\007strides\022\tlist(int)(\0010\005\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\"!\n\tdilations\022\tlist(int)\032\t\n\007\032\005\001\001\001\001\001B\035\010\n\022\031Use Conv3DBackpropInputV2\n\237\002\n\025Conv3DBackpropInputV2\022\025\n\013input_sizes\"\006Tshape\022\013\n\006filter\"\001T\022\021\n\014out_backprop\"\001T\032\013\n\006output\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\023\016\001\002\"\030\n\007strides\022\tlist(int)(\0010\005\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\"0\n\013data_format\022\006string\032\007\022\005NDHWC:\020\n\016\022\005NDHWC\022\005NCDHW\"!\n\tdilations\022\tlist(int)\032\t\n\007\032\005\001\001\001\001\001\"\032\n\006Tshape\022\004type\032\0020\003:\006\n\0042\002\003\t\nu\n\020DataFormatDimMap\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type\032\0020\003:\006\n\0042\002\003\t\"\034\n\nsrc_format\022\006string\032\006\022\004NHWC\"\034\n\ndst_format\022\006string\032\006\022\004NCHW\ny\n\024DataFormatVecPermute\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type\032\0020\003:\006\n\0042\002\003\t\"\034\n\nsrc_format\022\006string\032\006\022\004NHWC\"\034\n\ndst_format\022\006string\032\006\022\004NCHW\n\335\001\n\025DepthwiseConv2dNative\022\n\n\005input\"\001T\022\013\n\006filter\"\001T\032\013\n\006output\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\023\016\001\002\"\024\n\007strides\022\tlist(int)\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\"-\n\013data_format\022\006string\032\006\022\004NHWC:\016\n\014\022\004NHWC\022\004NCHW\" \n\tdilations\022\tlist(int)\032\010\n\006\032\004\001\001\001\001\n\203\002\n#DepthwiseConv2dNativeBackpropFilter\022\n\n\005input\"\001T\022\020\n\014filter_sizes\030\003\022\021\n\014out_backprop\"\001T\032\013\n\006output\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\023\016\001\002\"\024\n\007strides\022\tlist(int)\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\"-\n\013data_format\022\006string\032\006\022\004NHWC:\016\n\014\022\004NHWC\022\004NCHW\" \n\tdilations\022\tlist(int)\032\010\n\006\032\004\001\001\001\001\n\202\002\n\"DepthwiseConv2dNativeBackpropInput\022\017\n\013input_sizes\030\003\022\013\n\006filter\"\001T\022\021\n\014out_backprop\"\001T\032\013\n\006output\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\023\016\001\002\"\024\n\007strides\022\tlist(int)\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\"-\n\013data_format\022\006string\032\006\022\004NHWC:\016\n\014\022\004NHWC\022\004NCHW\" \n\tdilations\022\tlist(int)\032\010\n\006\032\004\001\001\001\001\n\245\001\n\nDilation2D\022\n\n\005input\"\001T\022\013\n\006filter\"\001T\032\013\n\006output\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\"\030\n\007strides\022\tlist(int)(\0010\004\"\026\n\005rates\022\tlist(int)(\0010\004\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\n\317\001\n\030Dilation2DBackpropFilter\022\n\n\005input\"\001T\022\013\n\006filter\"\001T\022\021\n\014out_backprop\"\001T\032\024\n\017filter_backprop\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\"\030\n\007strides\022\tlist(int)(\0010\004\"\026\n\005rates\022\tlist(int)(\0010\004\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\n\312\001\n\027Dilation2DBackpropInput\022\n\n\005input\"\001T\022\013\n\006filter\"\001T\022\021\n\014out_backprop\"\001T\032\020\n\013in_backprop\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\"\030\n\007strides\022\tlist(int)(\0010\004\"\026\n\005rates\022\tlist(int)(\0010\004\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\n;\n\003Elu\022\r\n\010features\"\001T\032\020\n\013activations\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\023\016\001\002\nL\n\007EluGrad\022\016\n\tgradients\"\001T\022\014\n\007outputs\"\001T\032\016\n\tbackprops\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\023\016\001\002\n\211\002\n\021FractionalAvgPool\022\n\n\005value\"\001T\032\013\n\006output\"\001T\032\030\n\024row_pooling_sequence\030\t\032\030\n\024col_pooling_sequence\030\t\" \n\rpooling_ratio\022\013list(float)(\0010\004\"\031\n\rpseudo_random\022\004bool\032\002(\000\"\027\n\013overlapping\022\004bool\032\002(\000\"\031\n\rdeterministic\022\004bool\032\002(\000\"\017\n\004seed\022\003int\032\002\030\000\"\020\n\005seed2\022\003int\032\002\030\000\"\023\n\001T\022\004type:\010\n\0062\004\001\002\003\t\n\266\001\n\025FractionalAvgPoolGrad\022\033\n\027orig_input_tensor_shape\030\t\022\021\n\014out_backprop\"\001T\022\030\n\024row_pooling_sequence\030\t\022\030\n\024col_pooling_sequence\030\t\032\013\n\006output\"\001T\"\027\n\013overlapping\022\004bool\032\002(\000\"\023\n\001T\022\004type:\010\n\0062\004\001\002\003\t\n\211\002\n\021FractionalMaxPool\022\n\n\005value\"\001T\032\013\n\006output\"\001T\032\030\n\024row_pooling_sequence\030\t\032\030\n\024col_pooling_sequence\030\t\" \n\rpooling_ratio\022\013list(float)(\0010\004\"\031\n\rpseudo_random\022\004bool\032\002(\000\"\027\n\013overlapping\022\004bool\032\002(\000\"\031\n\rdeterministic\022\004bool\032\002(\000\"\017\n\004seed\022\003int\032\002\030\000\"\020\n\005seed2\022\003int\032\002\030\000\"\023\n\001T\022\004type:\010\n\0062\004\001\002\003\t\n\274\001\n\025FractionalMaxPoolGrad\022\017\n\norig_input\"\001T\022\020\n\013orig_output\"\001T\022\021\n\014out_backprop\"\001T\022\030\n\024row_pooling_sequence\030\t\022\030\n\024col_pooling_sequence\030\t\032\013\n\006output\"\001T\"\027\n\013overlapping\022\004bool\032\002(\000\"\023\n\001T\022\004type:\010\n\0062\004\001\002\003\t\n\230\002\n\016FusedBatchNorm\022\006\n\001x\"\001T\022\n\n\005scale\"\001T\022\013\n\006offset\"\001T\022\t\n\004mean\"\001T\022\r\n\010variance\"\001T\032\006\n\001y\"\001T\032\017\n\nbatch_mean\"\001T\032\023\n\016batch_variance\"\001T\032\024\n\017reserve_space_1\"\001T\032\024\n\017reserve_space_2\"\001T\"\020\n\001T\022\004type:\005\n\0032\001\001\"\027\n\007epsilon\022\005float\032\005%\027\267\3218\"-\n\013data_format\022\006string\032\006\022\004NHWC:\016\n\014\022\004NHWC\022\004NCHW\"\027\n\013is_training\022\004bool\032\002(\001\n\300\002\n\022FusedBatchNormGrad\022\017\n\ny_backprop\"\001T\022\006\n\001x\"\001T\022\n\n\005scale\"\001T\022\024\n\017reserve_space_1\"\001T\022\024\n\017reserve_space_2\"\001T\032\017\n\nx_backprop\"\001T\032\023\n\016scale_backprop\"\001T\032\024\n\017offset_backprop\"\001T\032\024\n\017reserve_space_3\"\001T\032\024\n\017reserve_space_4\"\001T\"\020\n\001T\022\004type:\005\n\0032\001\001\"\027\n\007epsilon\022\005float\032\005%\027\267\3218\"-\n\013data_format\022\006string\032\006\022\004NHWC:\016\n\014\022\004NHWC\022\004NCHW\"\027\n\013is_training\022\004bool\032\002(\001\n\325\002\n\024FusedBatchNormGradV2\022\017\n\ny_backprop\"\001T\022\006\n\001x\"\001T\022\t\n\005scale\030\001\022\024\n\017reserve_space_1\"\001U\022\024\n\017reserve_space_2\"\001U\032\017\n\nx_backprop\"\001T\032\023\n\016scale_backprop\"\001U\032\024\n\017offset_backprop\"\001U\032\024\n\017reserve_space_3\"\001U\032\024\n\017reserve_space_4\"\001U\"\022\n\001T\022\004type:\007\n\0052\003\023\016\001\"\020\n\001U\022\004type:\005\n\0032\001\001\"\027\n\007epsilon\022\005float\032\005%\027\267\3218\"-\n\013data_format\022\006string\032\006\022\004NHWC:\016\n\014\022\004NHWC\022\004NCHW\"\027\n\013is_training\022\004bool\032\002(\001\n\256\002\n\020FusedBatchNormV2\022\006\n\001x\"\001T\022\n\n\005scale\"\001U\022\013\n\006offset\"\001U\022\t\n\004mean\"\001U\022\r\n\010variance\"\001U\032\006\n\001y\"\001T\032\017\n\nbatch_mean\"\001U\032\023\n\016batch_variance\"\001U\032\024\n\017reserve_space_1\"\001U\032\024\n\017reserve_space_2\"\001U\"\022\n\001T\022\004type:\007\n\0052\003\023\016\001\"\020\n\001U\022\004type:\005\n\0032\001\001\"\027\n\007epsilon\022\005float\032\005%\027\267\3218\"-\n\013data_format\022\006string\032\006\022\004NHWC:\016\n\014\022\004NHWC\022\004NCHW\"\027\n\013is_training\022\004bool\032\002(\001\n\272\001\n\016FusedPadConv2D\022\n\n\005input\"\001T\022\014\n\010paddings\030\003\022\013\n\006filter\"\001T\032\013\n\006output\"\001T\"\022\n\001T\022\004type:\007\n\0052\003\023\001\002\"&\n\004mode\022\006string:\026\n\024\022\007REFLECT\022\tSYMMETRIC\"\024\n\007strides\022\tlist(int)\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\n\357\001\n\027FusedResizeAndPadConv2D\022\n\n\005input\"\001T\022\010\n\004size\030\003\022\014\n\010paddings\030\003\022\013\n\006filter\"\001T\032\013\n\006output\"\001T\"\022\n\001T\022\004type:\007\n\0052\003\023\001\002\" \n\024resize_align_corners\022\004bool\032\002(\000\"&\n\004mode\022\006string:\026\n\024\022\007REFLECT\022\tSYMMETRIC\"\024\n\007strides\022\tlist(int)\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\nW\n\006InTopK\022\017\n\013predictions\030\001\022\014\n\007targets\"\001T\032\r\n\tprecision\030\n\"\010\n\001k\022\003int\"\025\n\001T\022\004type\032\0020\003:\006\n\0042\002\003\t\nW\n\010InTopKV2\022\017\n\013predictions\030\001\022\014\n\007targets\"\001T\022\006\n\001k\"\001T\032\r\n\tprecision\030\n\"\025\n\001T\022\004type\032\0020\003:\006\n\0042\002\003\t\n2\n\006L2Loss\022\006\n\001t\"\001T\032\013\n\006output\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\023\016\001\002\n\222\001\n\003LRN\022\n\n\005input\"\001T\032\013\n\006output\"\001T\"\027\n\014depth_radius\022\003int\032\002\030\005\"\024\n\004bias\022\005float\032\005%\000\000\200?\"\025\n\005alpha\022\005float\032\005%\000\000\200?\"\024\n\004beta\022\005float\032\005%\000\000\000?\"\026\n\001T\022\004type\032\0020\001:\007\n\0052\003\023\016\001\n\301\001\n\007LRNGrad\022\020\n\013input_grads\"\001T\022\020\n\013input_image\"\001T\022\021\n\014output_image\"\001T\032\013\n\006output\"\001T\"\027\n\014depth_radius\022\003int\032\002\030\005\"\024\n\004bias\022\005float\032\005%\000\000\200?\"\025\n\005alpha\022\005float\032\005%\000\000\200?\"\024\n\004beta\022\005float\032\005%\000\000\000?\"\026\n\001T\022\004type\032\0020\001:\007\n\0052\003\023\016\001\n?\n\nLogSoftmax\022\013\n\006logits\"\001T\032\017\n\nlogsoftmax\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\023\016\001\002\n\324\001\n\007MaxPool\022\n\n\005input\"\001T\032\013\n\006output\"\001T\"\036\n\001T\022\004type\032\0020\001:\017\n\r2\013\023\016\001\002\003\t\004\005\006\021\013\"\026\n\005ksize\022\tlist(int)(\0010\004\"\030\n\007strides\022\tlist(int)(\0010\004\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\":\n\013data_format\022\006string\032\006\022\004NHWC:\033\n\031\022\004NHWC\022\004NCHW\022\013NCHW_VECT_C\n\300\001\n\tMaxPool3D\022\n\n\005input\"\001T\032\013\n\006output\"\001T\"\026\n\005ksize\022\tlist(int)(\0010\005\"\030\n\007strides\022\tlist(int)(\0010\005\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\"0\n\013data_format\022\006string\032\007\022\005NDHWC:\020\n\016\022\005NDHWC\022\005NCDHW\"\022\n\001T\022\004type:\007\n\0052\003\023\016\001\n\221\002\n\rMaxPool3DGrad\022\024\n\norig_input\"\006TInput\022\025\n\013orig_output\"\006TInput\022\t\n\004grad\"\001T\032\013\n\006output\"\001T\"\026\n\005ksize\022\tlist(int)(\0010\005\"\030\n\007strides\022\tlist(int)(\0010\005\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\"0\n\013data_format\022\006string\032\007\022\005NDHWC:\020\n\016\022\005NDHWC\022\005NCDHW\"\026\n\001T\022\004type\032\0020\001:\007\n\0052\003\023\016\001\"\033\n\006TInput\022\004type\032\0020\001:\007\n\0052\003\023\016\001\n\363\001\n\021MaxPool3DGradGrad\022\017\n\norig_input\"\001T\022\020\n\013orig_output\"\001T\022\t\n\004grad\"\001T\032\013\n\006output\"\001T\"\026\n\005ksize\022\tlist(int)(\0010\005\"\030\n\007strides\022\tlist(int)(\0010\005\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\"0\n\013data_format\022\006string\032\007\022\005NDHWC:\020\n\016\022\005NDHWC\022\005NCDHW\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\n\356\001\n\013MaxPoolGrad\022\017\n\norig_input\"\001T\022\020\n\013orig_output\"\001T\022\t\n\004grad\"\001T\032\013\n\006output\"\001T\"\026\n\005ksize\022\tlist(int)(\0010\004\"\030\n\007strides\022\tlist(int)(\0010\004\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\"-\n\013data_format\022\006string\032\006\022\004NHWC:\016\n\014\022\004NHWC\022\004NCHW\"\037\n\001T\022\004type\032\0020\001:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\n\356\001\n\017MaxPoolGradGrad\022\017\n\norig_input\"\001T\022\020\n\013orig_output\"\001T\022\t\n\004grad\"\001T\032\013\n\006output\"\001T\"\026\n\005ksize\022\tlist(int)(\0010\004\"\030\n\007strides\022\tlist(int)(\0010\004\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\"-\n\013data_format\022\006string\032\006\022\004NHWC:\016\n\014\022\004NHWC\022\004NCHW\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\n\326\001\n\021MaxPoolGradGradV2\022\017\n\norig_input\"\001T\022\020\n\013orig_output\"\001T\022\t\n\004grad\"\001T\022\t\n\005ksize\030\003\022\013\n\007strides\030\003\032\013\n\006output\"\001T\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\"-\n\013data_format\022\006string\032\006\022\004NHWC:\016\n\014\022\004NHWC\022\004NCHW\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\n\336\001\n\031MaxPoolGradGradWithArgmax\022\n\n\005input\"\001T\022\t\n\004grad\"\001T\022\021\n\006argmax\"\007Targmax\032\013\n\006output\"\001T\"\026\n\005ksize\022\tlist(int)(\0010\004\"\030\n\007strides\022\tlist(int)(\0010\004\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\"\027\n\007Targmax\022\004type:\006\n\0042\002\003\t\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\n\326\001\n\rMaxPoolGradV2\022\017\n\norig_input\"\001T\022\020\n\013orig_output\"\001T\022\t\n\004grad\"\001T\022\t\n\005ksize\030\003\022\013\n\007strides\030\003\032\013\n\006output\"\001T\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\"-\n\013data_format\022\006string\032\006\022\004NHWC:\016\n\014\022\004NHWC\022\004NCHW\"\037\n\001T\022\004type\032\0020\001:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\n\332\001\n\025MaxPoolGradWithArgmax\022\n\n\005input\"\001T\022\t\n\004grad\"\001T\022\021\n\006argmax\"\007Targmax\032\013\n\006output\"\001T\"\026\n\005ksize\022\tlist(int)(\0010\004\"\030\n\007strides\022\tlist(int)(\0010\004\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\"\027\n\007Targmax\022\004type:\006\n\0042\002\003\t\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\n\274\001\n\tMaxPoolV2\022\n\n\005input\"\001T\022\t\n\005ksize\030\003\022\013\n\007strides\030\003\032\013\n\006output\"\001T\"\036\n\001T\022\004type\032\0020\001:\017\n\r2\013\023\016\001\002\003\t\004\005\006\021\013\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\":\n\013data_format\022\006string\032\006\022\004NHWC:\033\n\031\022\004NHWC\022\004NCHW\022\013NCHW_VECT_C\n\317\001\n\021MaxPoolWithArgmax\022\n\n\005input\"\001T\032\013\n\006output\"\001T\032\021\n\006argmax\"\007Targmax\"\026\n\005ksize\022\tlist(int)(\0010\004\"\030\n\007strides\022\tlist(int)(\0010\004\"\033\n\007Targmax\022\004type\032\0020\t:\006\n\0042\002\003\t\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\n^\n\nNthElement\022\n\n\005input\"\001T\022\005\n\001n\030\003\032\013\n\006values\"\001T\"\023\n\007reverse\022\004bool\032\002(\000\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\n\315\001\n\020QuantizedAvgPool\022\n\n\005input\"\001T\022\r\n\tmin_input\030\001\022\r\n\tmax_input\030\001\032\013\n\006output\"\001T\032\016\n\nmin_output\030\001\032\016\n\nmax_output\030\001\"\024\n\001T\022\004type:\t\n\0072\005\013\014\r\017\020\"\022\n\005ksize\022\tlist(int)\"\024\n\007strides\022\tlist(int)\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\n\231\003\n)QuantizedBatchNormWithGlobalNormalization\022\013\n\001t\"\006Tinput\022\t\n\005t_min\030\001\022\t\n\005t_max\030\001\022\013\n\001m\"\006Tinput\022\t\n\005m_min\030\001\022\t\n\005m_max\030\001\022\013\n\001v\"\006Tinput\022\t\n\005v_min\030\001\022\t\n\005v_max\030\001\022\016\n\004beta\"\006Tinput\022\014\n\010beta_min\030\001\022\014\n\010beta_max\030\001\022\017\n\005gamma\"\006Tinput\022\r\n\tgamma_min\030\001\022\r\n\tgamma_max\030\001\032\022\n\006result\"\010out_type\032\016\n\nresult_min\030\001\032\016\n\nresult_max\030\001\"\031\n\006Tinput\022\004type:\t\n\0072\005\013\014\r\017\020\"\033\n\010out_type\022\004type:\t\n\0072\005\013\014\r\017\020\"\031\n\020variance_epsilon\022\005float\"!\n\031scale_after_normalization\022\004bool\n\336\001\n\020QuantizedBiasAdd\022\013\n\005input\"\002T1\022\n\n\004bias\"\002T2\022\r\n\tmin_input\030\001\022\r\n\tmax_input\030\001\022\014\n\010min_bias\030\001\022\014\n\010max_bias\030\001\032\022\n\006output\"\010out_type\032\013\n\007min_out\030\001\032\013\n\007max_out\030\001\"\025\n\002T1\022\004type:\t\n\0072\005\013\014\r\017\020\"\025\n\002T2\022\004type:\t\n\0072\005\013\014\r\017\020\"\033\n\010out_type\022\004type:\t\n\0072\005\013\014\r\017\020\n\333\002\n\017QuantizedConv2D\022\017\n\005input\"\006Tinput\022\021\n\006filter\"\007Tfilter\022\r\n\tmin_input\030\001\022\r\n\tmax_input\030\001\022\016\n\nmin_filter\030\001\022\016\n\nmax_filter\030\001\032\022\n\006output\"\010out_type\032\016\n\nmin_output\030\001\032\016\n\nmax_output\030\001\"\031\n\006Tinput\022\004type:\t\n\0072\005\013\014\r\017\020\"\032\n\007Tfilter\022\004type:\t\n\0072\005\013\014\r\017\020\"\037\n\010out_type\022\004type\032\0020\r:\t\n\0072\005\013\014\r\017\020\"\024\n\007strides\022\tlist(int)\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\" \n\tdilations\022\tlist(int)\032\010\n\006\032\004\001\001\001\001\n\315\001\n\020QuantizedMaxPool\022\n\n\005input\"\001T\022\r\n\tmin_input\030\001\022\r\n\tmax_input\030\001\032\013\n\006output\"\001T\032\016\n\nmin_output\030\001\032\016\n\nmax_output\030\001\"\024\n\001T\022\004type:\t\n\0072\005\013\014\r\017\020\"\022\n\005ksize\022\tlist(int)\"\024\n\007strides\022\tlist(int)\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\n\306\001\n\rQuantizedRelu\022\022\n\010features\"\006Tinput\022\020\n\014min_features\030\001\022\020\n\014max_features\030\001\032\027\n\013activations\"\010out_type\032\023\n\017min_activations\030\001\032\023\n\017max_activations\030\001\"\031\n\006Tinput\022\004type:\t\n\0072\005\013\014\r\017\020\"\037\n\010out_type\022\004type\032\0020\014:\t\n\0072\005\013\014\r\017\020\n\307\001\n\016QuantizedRelu6\022\022\n\010features\"\006Tinput\022\020\n\014min_features\030\001\022\020\n\014max_features\030\001\032\027\n\013activations\"\010out_type\032\023\n\017min_activations\030\001\032\023\n\017max_activations\030\001\"\031\n\006Tinput\022\004type:\t\n\0072\005\013\014\r\017\020\"\037\n\010out_type\022\004type\032\0020\014:\t\n\0072\005\013\014\r\017\020\n\326\001\n\016QuantizedReluX\022\022\n\010features\"\006Tinput\022\r\n\tmax_value\030\001\022\020\n\014min_features\030\001\022\020\n\014max_features\030\001\032\027\n\013activations\"\010out_type\032\023\n\017min_activations\030\001\032\023\n\017max_activations\030\001\"\031\n\006Tinput\022\004type:\t\n\0072\005\013\014\r\017\020\"\037\n\010out_type\022\004type\032\0020\014:\t\n\0072\005\013\014\r\017\020\nE\n\004Relu\022\r\n\010features\"\001T\032\020\n\013activations\"\001T\"\034\n\001T\022\004type:\021\n\0172\r\001\002\003\004\005\006\t\016\021\023\026\027\013\nE\n\005Relu6\022\r\n\010features\"\001T\032\020\n\013activations\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\nW\n\tRelu6Grad\022\016\n\tgradients\"\001T\022\r\n\010features\"\001T\032\016\n\tbackprops\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\nV\n\010ReluGrad\022\016\n\tgradients\"\001T\022\r\n\010features\"\001T\032\016\n\tbackprops\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\n<\n\004Selu\022\r\n\010features\"\001T\032\020\n\013activations\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\023\016\001\002\nM\n\010SeluGrad\022\016\n\tgradients\"\001T\022\014\n\007outputs\"\001T\032\016\n\tbackprops\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\023\016\001\002\n9\n\007Softmax\022\013\n\006logits\"\001T\032\014\n\007softmax\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\023\016\001\002\nj\n\035SoftmaxCrossEntropyWithLogits\022\r\n\010features\"\001T\022\013\n\006labels\"\001T\032\t\n\004loss\"\001T\032\r\n\010backprop\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\023\016\001\002\n@\n\010Softplus\022\r\n\010features\"\001T\032\020\n\013activations\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\023\016\001\002\nR\n\014SoftplusGrad\022\016\n\tgradients\"\001T\022\r\n\010features\"\001T\032\016\n\tbackprops\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\023\016\001\002\n@\n\010Softsign\022\r\n\010features\"\001T\032\020\n\013activations\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\023\016\001\002\nR\n\014SoftsignGrad\022\016\n\tgradients\"\001T\022\r\n\010features\"\001T\032\016\n\tbackprops\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\023\016\001\002\n\223\001\n#SparseSoftmaxCrossEntropyWithLogits\022\r\n\010features\"\001T\022\021\n\006labels\"\007Tlabels\032\t\n\004loss\"\001T\032\r\n\010backprop\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\023\016\001\002\"\033\n\007Tlabels\022\004type\032\0020\t:\006\n\0042\002\003\t\n\201\001\n\004TopK\022\n\n\005input\"\001T\032\013\n\006values\"\001T\032\013\n\007indices\030\003\"\n\n\001k\022\003int(\001\"\022\n\006sorted\022\004bool\032\002(\001\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027B\026\010\007\022\022Use TopKV2 instead\nf\n\006TopKV2\022\n\n\005input\"\001T\022\005\n\001k\030\003\032\013\n\006values\"\001T\032\013\n\007indices\030\003\"\022\n\006sorted\022\004bool\032\002(\001\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027")